diff --git a/.gn b/.gn index d6f84df080..ea58dc547c 100644 --- a/.gn +++ b/.gn @@ -57,4 +57,7 @@ default_args = { enable_libaom = true gtest_enable_absl_printers = true + + # Differently from Chromium, WebRTC still support SDK 21. + default_min_sdk_version = 21 } diff --git a/.style.yapf b/.style.yapf new file mode 100644 index 0000000000..c34341d425 --- /dev/null +++ b/.style.yapf @@ -0,0 +1,4 @@ +[style] +based_on_style = pep8 +indent_width = 2 +column_limit = 80 \ No newline at end of file diff --git a/.vpython b/.vpython index df838dccf8..a57f82ff3e 100644 --- a/.vpython +++ b/.vpython @@ -9,7 +9,7 @@ # CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`, # this never requires the end-user machine to have a working python extension # compilation environment. All of these packages are built using: -# https://chromium.googlesource.com/infra/infra/+/master/infra/tools/dockerbuild/ +# https://chromium.googlesource.com/infra/infra/+/main/infra/tools/dockerbuild/ # # All python scripts in the repo share this same spec, to avoid dependency # fragmentation. @@ -20,7 +20,7 @@ # vpython path/to/script.py some --arguments # # Read more about `vpython` and how to modify this file here: -# https://chromium.googlesource.com/infra/infra/+/master/doc/users/vpython.md +# https://chromium.googlesource.com/infra/infra/+/main/doc/users/vpython.md python_version: "2.7" diff --git a/.vpython3 b/.vpython3 new file mode 100644 index 0000000000..99b1a0d8e9 --- /dev/null +++ b/.vpython3 @@ -0,0 +1,82 @@ +# This is a vpython "spec" file. +# +# It describes patterns for python wheel dependencies of the python scripts in +# the chromium repo, particularly for dependencies that have compiled components +# (since pure-python dependencies can be easily vendored into third_party). +# +# When vpython is invoked, it finds this file and builds a python VirtualEnv, +# containing all of the dependencies described in this file, fetching them from +# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`, +# this never requires the end-user machine to have a working python extension +# compilation environment. All of these packages are built using: +# https://chromium.googlesource.com/infra/infra/+/main/infra/tools/dockerbuild/ +# +# All python scripts in the repo share this same spec, to avoid dependency +# fragmentation. +# +# If you have depot_tools installed in your $PATH, you can invoke python scripts +# in this repo by running them as you normally would run them, except +# substituting `vpython` instead of `python` on the command line, e.g.: +# vpython path/to/script.py some --arguments +# +# Read more about `vpython` and how to modify this file here: +# https://chromium.googlesource.com/infra/infra/+/main/doc/users/vpython.md + +python_version: "3.8" + +# Used by: +# third_party/catapult +wheel: < + name: "infra/python/wheels/psutil/${vpython_platform}" + version: "version:5.8.0.chromium.2" +> + +# Used by tools_webrtc/perf/webrtc_dashboard_upload.py. +wheel: < + name: "infra/python/wheels/httplib2-py3" + version: "version:0.19.1" +> + +wheel: < + name: "infra/python/wheels/pyparsing-py2_py3" + version: "version:2.4.7" +> + + +# Used by: +# build/toolchain/win +wheel: < + name: "infra/python/wheels/pywin32/${vpython_platform}" + version: "version:300" + match_tag: < + platform: "win32" + > + match_tag: < + platform: "win_amd64" + > +> + +wheel: < + name: "infra/python/wheels/six-py2_py3" + version: "version:1.15.0" +> +wheel: < + name: "infra/python/wheels/pbr-py2_py3" + version: "version:3.0.0" +> +wheel: < + name: "infra/python/wheels/funcsigs-py2_py3" + version: "version:1.0.2" +> +wheel: < + name: "infra/python/wheels/mock-py2_py3" + version: "version:2.0.0" +> +wheel: < + name: "infra/python/wheels/protobuf-py2_py3" + version: "version:3.13.0" +> +wheel: < + name: "infra/python/wheels/requests-py2_py3" + version: "version:2.13.0" +> diff --git a/AUTHORS b/AUTHORS index b4d4100c6a..e4729a574b 100644 --- a/AUTHORS +++ b/AUTHORS @@ -25,6 +25,7 @@ Ben Strong Berthold Herrmann Bob Withers Bridger Maxwell +Cheng Qian Christophe Dumez Chris Tserng Cody Barnes @@ -57,6 +58,7 @@ Jesús Leganés-Combarro Jiawei Ou Jie Mao Jiwon Kim +Johnny Wong Jose Antonio Olivera Ortega Keiichi Enomoto Kiran Thind @@ -68,6 +70,7 @@ Mallikarjuna Rao V Manish Jethani Martin Storsjo Matthias Liebig +Maksim Sisov Maxim Pavlov Maxim Potapov Michael Iedema @@ -76,6 +79,7 @@ Miguel Paris Mike Gilbert Min Wang Mo Zanaty +Niek van der Maas Pali Rohar Paul Kapustin Peng Yu @@ -87,16 +91,19 @@ Raman Budny Ramprakash Jelari Riku Voipio Robert Bares +Robert Mader Robert Nagy Ryan Yoakum Sarah Thompson Satender Saroha Saul Kravitz Sergio Garcia Murillo +Shuhai Peng Silviu Caragea Stefan Gula Stephan Hartmann Steve Reid +Takaaki Suzuki Tarun Chawla Todd Wong Tomas Popela @@ -107,6 +114,7 @@ Victor Costan Vladimir Beloborodov Xiaohong Xu Xiaolei Yu +Yaowen Guo Yura Yaroshevich Yuriy Pavlyshak Yusuke Suzuki @@ -114,6 +122,7 @@ Yusuke Suzuki # BEGIN organizations section. 8x8 Inc. <*@8x8.com> +8x8 Inc. <*@jitsi.org> 8x8 Inc. <*@sip-communicator.org> Agora IO <*@agora.io> ARM Holdings <*@arm.com> @@ -129,6 +138,7 @@ Microsoft Corporation <*@microsoft.com> MIPS Technologies <*@mips.com> Mozilla Foundation <*@mozilla.com> Netgem S.A. <*@netgem.com> +Nutanix Inc. <*@nutanix.com> NVIDIA Corporation <*@nvidia.com> Opera Software ASA <*@opera.com> Optical Tone Ltd <*@opticaltone.com> diff --git a/BUILD.gn b/BUILD.gn index bc51df7c07..fa4fcace49 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -12,6 +12,15 @@ # you add a new build file, there must be some path of dependencies from this # file to your new one or GN won't know about it. +# Use of visibility = clauses: +# The default visibility for all rtc_ targets is equivalent to "//*", or +# "all targets in webrtc can depend on this, nothing outside can". +# +# When overriding, the choices are: +# - visibility = [ "*" ] - public. Stuff outside webrtc can use this. +# - visibility = [ ":*" ] - directory private. +# As a general guideline, only targets in api/ should have public visibility. + import("//build/config/linux/pkg_config.gni") import("//build/config/sanitizers/sanitizers.gni") import("//third_party/google_benchmark/buildconfig.gni") @@ -39,7 +48,6 @@ if (!build_with_chromium) { if (rtc_include_tests) { deps += [ ":rtc_unittests", - ":slow_tests", ":video_engine_tests", ":voip_unittests", ":webrtc_nonparallel_tests", @@ -118,12 +126,15 @@ config("common_inherited_config") { cflags = [] ldflags = [] - if (rtc_enable_symbol_export || is_component_build) { - defines = [ "WEBRTC_ENABLE_SYMBOL_EXPORT" ] + if (rtc_dlog_always_on) { + defines += [ "DLOG_ALWAYS_ON" ] } - if (build_with_mozilla) { - defines += [ "WEBRTC_MOZILLA_BUILD" ] + if (rtc_enable_symbol_export || is_component_build) { + defines += [ "WEBRTC_ENABLE_SYMBOL_EXPORT" ] + } + if (rtc_enable_objc_symbol_export) { + defines += [ "WEBRTC_ENABLE_OBJC_SYMBOL_EXPORT" ] } if (!rtc_builtin_ssl_root_certificates) { @@ -219,14 +230,6 @@ config("common_inherited_config") { } } -# TODO(bugs.webrtc.org/9693): Remove the possibility to suppress this warning -# as soon as WebRTC compiles without it. -config("no_exit_time_destructors") { - if (is_clang) { - cflags = [ "-Wno-exit-time-destructors" ] - } -} - # TODO(bugs.webrtc.org/9693): Remove the possibility to suppress this warning # as soon as WebRTC compiles without it. config("no_global_constructors") { @@ -266,6 +269,10 @@ config("common_config") { defines += [ "RTC_ENABLE_VP9" ] } + if (rtc_include_dav1d_in_internal_decoder_factory) { + defines += [ "RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY" ] + } + if (rtc_enable_sctp) { defines += [ "WEBRTC_HAVE_SCTP" ] } @@ -302,7 +309,10 @@ config("common_config") { defines += [ "WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE" ] } - cflags = [] + # TODO(webrtc:13219): Fix -Wshadow instances and enable. + if (is_clang) { + cflags += [ "-Wno-shadow" ] + } if (build_with_chromium) { defines += [ @@ -340,27 +350,13 @@ config("common_config") { if (is_clang) { cflags += [ "-Wc++11-narrowing", - "-Wimplicit-fallthrough", - "-Wthread-safety", - "-Winconsistent-missing-override", "-Wundef", ] - # use_xcode_clang only refers to the iOS toolchain, host binaries use - # chromium's clang always. - if (!is_nacl && - (!use_xcode_clang || current_toolchain == host_toolchain)) { - # Flags NaCl (Clang 3.7) and Xcode 7.3 (Clang clang-703.0.31) do not - # recognize. + if (!is_nacl) { + # Flags NaCl (Clang 3.7) do not recognize. cflags += [ "-Wunused-lambda-capture" ] } - - if (use_xcode_clang) { - # This may be removed if the clang version in xcode > 12.4 includes the - # fix https://reviews.llvm.org/D73007. - # https://bugs.llvm.org/show_bug.cgi?id=44556 - cflags += [ "-Wno-range-loop-analysis" ] - } } if (is_win && !is_clang) { @@ -569,7 +565,9 @@ if (rtc_include_tests && !build_with_chromium) { "rtc_base/experiments:experiments_unittests", "rtc_base/system:file_wrapper_unittests", "rtc_base/task_utils:pending_task_safety_flag_unittests", + "rtc_base/task_utils:repeating_task_unittests", "rtc_base/task_utils:to_queued_task_unittests", + "rtc_base/units:units_unittests", "sdk:sdk_tests", "test:rtp_test_utils", "test:test_main", @@ -607,17 +605,6 @@ if (rtc_include_tests && !build_with_chromium) { } } - # This runs tests that must run in real time and therefore can take some - # time to execute. They are in a separate executable to avoid making the - # regular unittest suite too slow to run frequently. - rtc_test("slow_tests") { - testonly = true - deps = [ - "rtc_base/task_utils:repeating_task_unittests", - "test:test_main", - ] - } - # TODO(pbos): Rename test suite, this is no longer "just" for video targets. video_engine_tests_resources = [ "resources/foreman_cif_short.yuv", @@ -644,6 +631,7 @@ if (rtc_include_tests && !build_with_chromium) { "test:test_common", "test:test_main", "test:video_test_common", + "video:video_legacy_tests", "video:video_tests", "video/adaptation:video_adaptation_tests", ] @@ -740,6 +728,9 @@ group("poison_audio_codecs") { group("poison_default_task_queue") { } +group("poison_default_echo_detector") { +} + group("poison_rtc_json") { } diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 7b48e69e7a..f6b7d0f6a3 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -61,8 +61,8 @@ The decisions of the WebRTC community managers may be appealed via community-app ## Acknowledgements This Code of Conduct is based on Contributor Covenant, version 1.4, -available [here](http://contributor-covenant.org/version/1/4) and [Chromium](https://chromium.googlesource.com/chromium/src/+/master/CODE_OF_CONDUCT.md) +available [here](http://contributor-covenant.org/version/1/4) and [Chromium](https://chromium.googlesource.com/chromium/src/+/main/CODE_OF_CONDUCT.md) ## License -This Code of Conduct is available for reuse under the Creative Commons Zero (CC0) license. \ No newline at end of file +This Code of Conduct is available for reuse under the Creative Commons Zero (CC0) license. diff --git a/DEPS b/DEPS index c24608a98a..9bc9237a85 100644 --- a/DEPS +++ b/DEPS @@ -10,40 +10,43 @@ vars = { # chromium waterfalls. More info at: crbug.com/570091. 'checkout_configuration': 'default', 'checkout_instrumented_libraries': 'checkout_linux and checkout_configuration == "default"', - 'chromium_revision': '6d8828f6a6eea769a05fa1c0b7acf10aca631d4a', + 'chromium_revision': '1d65e161b823702fbaed6c1e74bf01008cad2edf', # Keep the Chromium default of generating location tags. 'generate_location_tags': True, + + # ResultDB version + 'resultdb_version': 'git_revision:735a8a662d3874d8b1d795a40e46ea0f57b52758', } deps = { # TODO(kjellander): Move this to be Android-only once the libevent dependency # in base/third_party/libevent is solved. 'src/base': - 'https://chromium.googlesource.com/chromium/src/base@e1acc6a30942360d4789d6c245cf7933e7e9bbec', + 'https://chromium.googlesource.com/chromium/src/base@5de2454055b98b7cedb8b3ca31a5e73fd7f2329d', 'src/build': - 'https://chromium.googlesource.com/chromium/src/build@826926008327af276adbaafcfa92b525eb5bf326', + 'https://chromium.googlesource.com/chromium/src/build@bcaab68639cce04f2c52c6d65f5199af6d763d75', 'src/buildtools': - 'https://chromium.googlesource.com/chromium/src/buildtools@2500c1d8f3a20a66a7cbafe3f69079a2edb742dd', + 'https://chromium.googlesource.com/chromium/src/buildtools@2a745cc87d28e53996469189bc7661811d6f9c34', # Gradle 6.6.1. Used for testing Android Studio project generation for WebRTC. 'src/examples/androidtests/third_party/gradle': { 'url': 'https://chromium.googlesource.com/external/github.com/gradle/gradle.git@f2d1fb54a951d8b11d25748e4711bec8d128d7e3', 'condition': 'checkout_android', }, 'src/ios': { - 'url': 'https://chromium.googlesource.com/chromium/src/ios@695a3541172406518e45c377048956a3e5270d7c', + 'url': 'https://chromium.googlesource.com/chromium/src/ios@9adcdab41620a7c899745609e4083d023e449d1e', 'condition': 'checkout_ios', }, 'src/testing': - 'https://chromium.googlesource.com/chromium/src/testing@d749d1b98b475ea15face1c9d2311ed6b8e4b91f', + 'https://chromium.googlesource.com/chromium/src/testing@67a0c97ff4b57f30d97825107bee9e112a020f79', 'src/third_party': - 'https://chromium.googlesource.com/chromium/src/third_party@c1d40d8b399db4c5ebab5e5022a002dca5b3dbb2', + 'https://chromium.googlesource.com/chromium/src/third_party@f30eafbf5286fc58f85215253b7807c2b7125361', 'src/buildtools/linux64': { 'packages': [ { 'package': 'gn/gn/linux-amd64', - 'version': 'git_revision:24e2f7df92641de0351a96096fb2c490b2436bb8', + 'version': 'git_revision:0725d7827575b239594fbc8fd5192873a1d62f44', } ], 'dep_type': 'cipd', @@ -53,7 +56,7 @@ deps = { 'packages': [ { 'package': 'gn/gn/mac-${{arch}}', - 'version': 'git_revision:24e2f7df92641de0351a96096fb2c490b2436bb8', + 'version': 'git_revision:0725d7827575b239594fbc8fd5192873a1d62f44', } ], 'dep_type': 'cipd', @@ -63,7 +66,7 @@ deps = { 'packages': [ { 'package': 'gn/gn/windows-amd64', - 'version': 'git_revision:24e2f7df92641de0351a96096fb2c490b2436bb8', + 'version': 'git_revision:0725d7827575b239594fbc8fd5192873a1d62f44', } ], 'dep_type': 'cipd', @@ -71,13 +74,13 @@ deps = { }, 'src/buildtools/clang_format/script': - 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/clang/tools/clang-format.git@99803d74e35962f63a775f29477882afd4d57d94', + 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/clang/tools/clang-format.git@e435ad79c17b1888b34df88d6a30a094936e3836', 'src/buildtools/third_party/libc++/trunk': 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxx.git@79a2e924d96e2fc1e4b937c42efd08898fa472d7', 'src/buildtools/third_party/libc++abi/trunk': - 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxxabi.git@cb34896ebd62f93f708ff9aad26159cf11dde6f4', + 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxxabi.git@77c52e2f68f76dbe661420d85f51521805990ae2', 'src/buildtools/third_party/libunwind/trunk': - 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libunwind.git@e7ac0f84fc2f2f8bd2ad151a7348e7120d77648a', + 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libunwind.git@b86911d6e7455c4be2ddbc390803aecad19cfc3a', 'src/tools/clang/dsymutil': { 'packages': [ @@ -93,19 +96,28 @@ deps = { 'src/third_party/android_system_sdk': { 'packages': [ { - 'package': 'chromium/third_party/android_system_sdk', - 'version': 'no8ss5nRg6uYDM08HboypuIQuix7bS1kVqRGyWmwP-YC', + 'package': 'chromium/third_party/android_system_sdk/public', + 'version': 'oSfDIvHlCa6W0gS79Q5OOfB9E4eBg3uAvi3BEDN21U0C', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, + 'src/tools/resultdb': { + 'packages': [ + { + 'package': 'infra/tools/result_adapter/${{platform}}', + 'version': Var('resultdb_version'), + }, + ], + 'dep_type': 'cipd', + }, 'src/third_party/android_build_tools/aapt2': { 'packages': [ { 'package': 'chromium/third_party/android_build_tools/aapt2', - 'version': 'R2k5wwOlIaS6sjv2TIyHotiPJod-6KqnZO8NH-KFK8sC', + 'version': '0yR8wK_fSMgdVKwnx4nRPi-amaLV7Kcr4Os6mg_DGI4C', }, ], 'condition': 'checkout_android', @@ -115,8 +127,8 @@ deps = { 'src/third_party/android_build_tools/bundletool': { 'packages': [ { - 'package': 'chromium/third_party/android_tools_bundletool', - 'version': 'FIj7ed-law2zMv41QhjEXabkaJ7aN2ztmE0Tv3Z_gFUC', + 'package': 'chromium/third_party/android_build_tools/bundletool', + 'version': '2ZcLVDxyRwp8FzpeYLtLT0TfSRweZxvwh1-Kx1jZ_FoC', }, ], 'condition': 'checkout_android', @@ -124,11 +136,11 @@ deps = { }, 'src/third_party/boringssl/src': - 'https://boringssl.googlesource.com/boringssl.git@a10017c548b0805eb98e7847c37370dbd37cd8d6', + 'https://boringssl.googlesource.com/boringssl.git@123eaaef26abc278f53ae338e9c758eb01c70b08', 'src/third_party/breakpad/breakpad': - 'https://chromium.googlesource.com/breakpad/breakpad.git@b95c4868b10f69e642666742233aede1eb653012', + 'https://chromium.googlesource.com/breakpad/breakpad.git@08bd844599bf04c71707e8f59a8013a941264695', 'src/third_party/catapult': - 'https://chromium.googlesource.com/catapult.git@3345f09ed65020a999e108ea37d30b49c87e14ed', + 'https://chromium.googlesource.com/catapult.git@25f38be662aec0c8b1509024001e5cc5254a363c', 'src/third_party/ced/src': { 'url': 'https://chromium.googlesource.com/external/github.com/google/compact_enc_det.git@ba412eaaacd3186085babcd901679a48863c7dd5', }, @@ -137,9 +149,9 @@ deps = { 'src/third_party/crc32c/src': 'https://chromium.googlesource.com/external/github.com/google/crc32c.git@fa5ade41ee480003d9c5af6f43567ba22e4e17e6', 'src/third_party/depot_tools': - 'https://chromium.googlesource.com/chromium/tools/depot_tools.git@a806594b95a39141fdbf1f359087a44ffb2deaaf', + 'https://chromium.googlesource.com/chromium/tools/depot_tools.git@1b93e14c92416185694f8764be425f82fa0d4242', 'src/third_party/ffmpeg': - 'https://chromium.googlesource.com/chromium/third_party/ffmpeg.git@05c195662f0527913811827ba253cb93758ea4c0', + 'https://chromium.googlesource.com/chromium/third_party/ffmpeg.git@574c39cce3231c69bc9a02ac475c27d944bdb113', 'src/third_party/findbugs': { 'url': 'https://chromium.googlesource.com/chromium/deps/findbugs.git@4275d9ac8610db6b1bc9a5e887f97e41b33fac67', 'condition': 'checkout_android', @@ -150,11 +162,11 @@ deps = { 'condition': 'checkout_linux', }, 'src/third_party/freetype/src': - 'https://chromium.googlesource.com/chromium/src/third_party/freetype2.git@d3dc2da9b27af5b90575d62989389cc65fe7977c', + 'https://chromium.googlesource.com/chromium/src/third_party/freetype2.git@24db55ecb81ca726b9c7e12e37d54a986c84f014', 'src/third_party/harfbuzz-ng/src': - 'https://chromium.googlesource.com/external/github.com/harfbuzz/harfbuzz.git@cc9bb294919e846ef8a0731b5e9f304f95ef3bb8', + 'https://chromium.googlesource.com/external/github.com/harfbuzz/harfbuzz.git@0acf466c44143de2e9b9cc0375cb25ec67cb132f', 'src/third_party/google_benchmark/src': { - 'url': 'https://chromium.googlesource.com/external/github.com/google/benchmark.git@e991355c02b93fe17713efe04cbc2e278e00fdbd', + 'url': 'https://chromium.googlesource.com/external/github.com/google/benchmark.git@f730846b0a3c0dc0699978846fb14ffb2fad0bdc', }, # WebRTC-only dependency (not present in Chromium). 'src/third_party/gtest-parallel': @@ -170,9 +182,9 @@ deps = { 'dep_type': 'cipd', }, 'src/third_party/googletest/src': - 'https://chromium.googlesource.com/external/github.com/google/googletest.git@4ec4cd23f486bf70efcc5d2caa40f24368f752e3', + 'https://chromium.googlesource.com/external/github.com/google/googletest.git@43efa0a4efd40c78b9210d15373112081899a97c', 'src/third_party/icu': { - 'url': 'https://chromium.googlesource.com/chromium/deps/icu.git@b9dfc58bf9b02ea0365509244aca13841322feb0', + 'url': 'https://chromium.googlesource.com/chromium/deps/icu.git@2e0f2989441ec2f55abec30f48e89981dbac2c34', }, 'src/third_party/jdk': { 'packages': [ @@ -204,21 +216,23 @@ deps = { 'src/third_party/libFuzzer/src': 'https://chromium.googlesource.com/chromium/llvm-project/compiler-rt/lib/fuzzer.git@debe7d2d1982e540fbd6bd78604bf001753f9e74', 'src/third_party/libjpeg_turbo': - 'https://chromium.googlesource.com/chromium/deps/libjpeg_turbo.git@e9e400e0af31baf72d235655850bc00e55b6c145', + 'https://chromium.googlesource.com/chromium/deps/libjpeg_turbo.git@02959c3ee17abacfd1339ec22ea93301292ffd56', 'src/third_party/libsrtp': 'https://chromium.googlesource.com/chromium/deps/libsrtp.git@5b7c744eb8310250ccc534f3f86a2015b3887a0a', + 'src/third_party/dav1d/libdav1d': + 'https://chromium.googlesource.com/external/github.com/videolan/dav1d.git@cce2b0564d3b0aa1a5e4ec0c561b6a1b8497cebb', 'src/third_party/libaom/source/libaom': - 'https://aomedia.googlesource.com/aom.git@aba245dde334bd51a20940eb009fa46b6ffd4511', + 'https://aomedia.googlesource.com/aom.git@c9feb209a41343981c215072583d385bdda2bab1', 'src/third_party/libunwindstack': { - 'url': 'https://chromium.googlesource.com/chromium/src/third_party/libunwindstack.git@b34a0059a648f179ef05da2c0927f564bdaea2b3', + 'url': 'https://chromium.googlesource.com/chromium/src/third_party/libunwindstack.git@6868358481bb1e5e20d155c1084dc436c88b5e6b', 'condition': 'checkout_android', }, 'src/third_party/perfetto': - 'https://android.googlesource.com/platform/external/perfetto.git@aecbd80f576686b67e29bdfae8c9c03bb9ce1996', + 'https://android.googlesource.com/platform/external/perfetto.git@605bd3a3ad96c56f98780e7b4b6b247e1d9e0ae9', 'src/third_party/libvpx/source/libvpx': - 'https://chromium.googlesource.com/webm/libvpx.git@eebc5cd487a89c51ba148f6d6ac45779970f72d7', + 'https://chromium.googlesource.com/webm/libvpx.git@51415c4076578d3cbc32fcd0d683161c3e887814', 'src/third_party/libyuv': - 'https://chromium.googlesource.com/libyuv/libyuv.git@49ebc996aa8c4bdf89c1b5ea461eb677234c61cc', + 'https://chromium.googlesource.com/libyuv/libyuv.git@b4ddbaf549a1bf5572bf703fd2862d1eb7380c6a', 'src/third_party/lss': { 'url': 'https://chromium.googlesource.com/linux-syscall-support.git@92a65a8f5d705d1928874420c8d0d15bde8c89e5', 'condition': 'checkout_android or checkout_linux', @@ -230,16 +244,16 @@ deps = { # Used by boringssl. 'src/third_party/nasm': { - 'url': 'https://chromium.googlesource.com/chromium/deps/nasm.git@e9be5fd6d723a435ca2da162f9e0ffcb688747c1' + 'url': 'https://chromium.googlesource.com/chromium/deps/nasm.git@9215e8e1d0fe474ffd3e16c1a07a0f97089e6224' }, 'src/third_party/openh264/src': - 'https://chromium.googlesource.com/external/github.com/cisco/openh264@3dd5b80bc4f172dd82925bb259cb7c82348409c5', + 'https://chromium.googlesource.com/external/github.com/cisco/openh264@b52786888ddce9d6bc06b7825ba9bffc65924e0c', 'src/third_party/r8': { 'packages': [ { 'package': 'chromium/third_party/r8', - 'version': 'Nu_mvQJe34CotIXadFlA3w732CJ9EvQGuVs4udcZedAC', + 'version': 'cEv1yyfxfmP_MaZrG22cR7YPc7hehgHAZd82lRx0DFAC', }, ], 'condition': 'checkout_android', @@ -264,16 +278,14 @@ deps = { 'condition': 'checkout_android', }, 'src/third_party/usrsctp/usrsctplib': - 'https://chromium.googlesource.com/external/github.com/sctplab/usrsctp@1ade45cbadfd19298d2c47dc538962d4425ad2dd', + 'https://chromium.googlesource.com/external/github.com/sctplab/usrsctp@62d7d0c928c9a040dce96aa2f16c00e7e67d59cb', # Dependency used by libjpeg-turbo. 'src/third_party/yasm/binaries': { 'url': 'https://chromium.googlesource.com/chromium/deps/yasm/binaries.git@52f9b3f4b0aa06da24ef8b123058bb61ee468881', 'condition': 'checkout_win', }, 'src/tools': - 'https://chromium.googlesource.com/chromium/src/tools@1a00526b21d46b8b86f13add37003fd33885f32b', - 'src/tools/swarming_client': - 'https://chromium.googlesource.com/infra/luci/client-py.git@a32a1607f6093d338f756c7e7c7b4333b0c50c9c', + 'https://chromium.googlesource.com/chromium/src/tools@55c693b555dff2de2756ae4cd2e1322737879a6f', 'src/third_party/accessibility_test_framework': { 'packages': [ @@ -364,7 +376,7 @@ deps = { }, 'src/third_party/android_ndk': { - 'url': 'https://chromium.googlesource.com/android_ndk.git@401019bf85744311b26c88ced255cd53401af8b7', + 'url': 'https://chromium.googlesource.com/android_ndk.git@9644104c8cf85bf1bdce5b1c0691e9778572c3f8', 'condition': 'checkout_android', }, @@ -372,7 +384,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/androidx', - 'version': '-umIXLPTAdxRy2iaK4QFSeOf4t7PAKglJP7ggvWhfRwC', + 'version': 'EqXyh_ypMvWmNBJPk_Xk9bp-pc8lerSz48cySEsnbMwC', }, ], 'condition': 'checkout_android', @@ -382,12 +394,12 @@ deps = { 'src/third_party/android_sdk/public': { 'packages': [ { - 'package': 'chromium/third_party/android_sdk/public/build-tools/30.0.1', - 'version': '8LZujEmLjSh0g3JciDA3cslSptxKs9HOa_iUPXkOeYQC', + 'package': 'chromium/third_party/android_sdk/public/build-tools/31.0.0', + 'version': 'tRoD45SCi7UleQqSV7MrMQO1_e5P8ysphkCcj6z_cCQC', }, { 'package': 'chromium/third_party/android_sdk/public/emulator', - 'version': 'A4EvXZUIuQho0QRDJopMUpgyp6NA3aiDQjGKPUKbowMC', + 'version': 'gMHhUuoQRKfxr-MBn3fNNXZtkAVXtOwMwT7kfx8jkIgC', }, { 'package': 'chromium/third_party/android_sdk/public/extras', @@ -399,19 +411,19 @@ deps = { }, { 'package': 'chromium/third_party/android_sdk/public/platform-tools', - 'version': '8tF0AOj7Dwlv4j7_nfkhxWB0jzrvWWYjEIpirt8FIWYC', + 'version': 'g7n_-r6yJd_SGRklujGB1wEt8iyr77FZTUJVS9w6O34C', }, { - 'package': 'chromium/third_party/android_sdk/public/platforms/android-30', - 'version': 'YMUu9EHNZ__2Xcxl-KsaSf-dI5TMt_P62IseUVsxktMC', + 'package': 'chromium/third_party/android_sdk/public/platforms/android-31', + 'version': 'lL3IGexKjYlwjO_1Ga-xwxgwbE_w-lmi2Zi1uOlWUIAC', }, { - 'package': 'chromium/third_party/android_sdk/public/sources/android-29', - 'version': '4gxhM8E62bvZpQs7Q3d0DinQaW0RLCIefhXrQBFkNy8C', + 'package': 'chromium/third_party/android_sdk/public/sources/android-31', + 'version': '_a_BcnANjPYw5mSKlNHa7GFY8yc1kdqj2rmQgac7yUcC', }, { 'package': 'chromium/third_party/android_sdk/public/cmdline-tools', - 'version': 'V__2Ycej-H2-6AcXX5A3gi7sIk74SuN44PBm2uC_N1sC', + 'version': 'Ez2NWws2SJYCF6qw2O-mSCqK6424l3ZdSTpppLyVR_cC', }, ], 'condition': 'checkout_android', @@ -444,7 +456,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/robolectric', - 'version': 'iC6RDM5EH3GEAzR-1shW_Mg0FeeNE5shq1okkFfuuNQC', + 'version': 'Kdd6dNFAKqj9g9Bsfo2z1zQr52Vk60EL_wb9Bf2c8rcC', }, ], 'condition': 'checkout_android', @@ -466,7 +478,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/turbine', - 'version': 'Om6yIEXgJxuqghErK29h9RcMH6VaymMbxwScwXmcN6EC', + 'version': 'ZweGxnankyMPNqORbYcApzrOXpyg-9XcK7_-qdjELlcC', }, ], 'condition': 'checkout_android', @@ -477,15 +489,11 @@ deps = { 'packages': [ { 'package': 'infra/tools/luci/isolate/${{platform}}', - 'version': 'git_revision:2ac8bd9cbc20824bb04a39b0f1b77178ace930b3', - }, - { - 'package': 'infra/tools/luci/isolated/${{platform}}', - 'version': 'git_revision:2ac8bd9cbc20824bb04a39b0f1b77178ace930b3', + 'version': 'git_revision:462d0a9cdbe947cd652fcd0c54f64ebc712858a5', }, { 'package': 'infra/tools/luci/swarming/${{platform}}', - 'version': 'git_revision:2ac8bd9cbc20824bb04a39b0f1b77178ace930b3', + 'version': 'git_revision:462d0a9cdbe947cd652fcd0c54f64ebc712858a5', }, ], 'dep_type': 'cipd', @@ -952,7 +960,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_tools_common', - 'version': 'version:2@30.0.0-alpha10.cr0', + 'version': 'version:2@30.2.0-beta01.cr0', }, ], 'condition': 'checkout_android', @@ -985,7 +993,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_tools_layoutlib_layoutlib_api', - 'version': 'version:2@30.0.0-alpha10.cr0', + 'version': 'version:2@30.2.0-beta01.cr0', }, ], 'condition': 'checkout_android', @@ -996,7 +1004,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_tools_sdk_common', - 'version': 'version:2@30.0.0-alpha10.cr0', + 'version': 'version:2@30.2.0-beta01.cr0', }, ], 'condition': 'checkout_android', @@ -1271,7 +1279,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_material_material', - 'version': 'version:2@1.4.0-rc01.cr0', + 'version': 'version:2@1.6.0-alpha01.cr0', }, ], 'condition': 'checkout_android', @@ -1293,7 +1301,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_auto_auto_common', - 'version': 'version:2@0.10.cr0', + 'version': 'version:2@1.1.2.cr0', }, ], 'condition': 'checkout_android', @@ -1414,7 +1422,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_annotation', - 'version': 'version:2@2.7.1.cr0', + 'version': 'version:2@2.10.0.cr0', }, ], 'condition': 'checkout_android', @@ -1425,7 +1433,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_annotations', - 'version': 'version:2@2.7.1.cr0', + 'version': 'version:2@2.10.0.cr0', }, ], 'condition': 'checkout_android', @@ -1436,7 +1444,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_check_api', - 'version': 'version:2@2.7.1.cr0', + 'version': 'version:2@2.10.0.cr0', }, ], 'condition': 'checkout_android', @@ -1447,7 +1455,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_core', - 'version': 'version:2@2.7.1.cr0', + 'version': 'version:2@2.10.0.cr0', }, ], 'condition': 'checkout_android', @@ -1458,7 +1466,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_type_annotations', - 'version': 'version:2@2.7.1.cr0', + 'version': 'version:2@2.10.0.cr0', }, ], 'condition': 'checkout_android', @@ -1608,6 +1616,17 @@ deps = { 'dep_type': 'cipd', }, + 'src/third_party/android_deps/libs/com_google_flatbuffers_flatbuffers_java': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_flatbuffers_flatbuffers_java', + 'version': 'version:2@2.0.3.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + 'src/third_party/android_deps/libs/com_google_googlejavaformat_google_java_format': { 'packages': [ { @@ -1634,7 +1653,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_guava_guava', - 'version': 'version:2@30.1-jre.cr0', + 'version': 'version:2@31.0-jre.cr0', }, ], 'condition': 'checkout_android', @@ -1645,7 +1664,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_guava_guava_android', - 'version': 'version:2@30.1-android.cr0', + 'version': 'version:2@31.0-android.cr0', }, ], 'condition': 'checkout_android', @@ -1689,7 +1708,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_protobuf_protobuf_javalite', - 'version': 'version:2@3.13.0.cr0', + 'version': 'version:2@3.19.3.cr0', }, ], 'condition': 'checkout_android', @@ -2019,18 +2038,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_checker_qual', - 'version': 'version:2@3.8.0.cr0', + 'version': 'version:2@3.12.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_checkerframework_dataflow_shaded': { + 'src/third_party/android_deps/libs/org_checkerframework_dataflow_errorprone': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_dataflow_shaded', - 'version': 'version:2@3.11.0.cr0', + 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_dataflow_errorprone', + 'version': 'version:2@3.15.0.cr0', }, ], 'condition': 'checkout_android', @@ -2107,7 +2126,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib', - 'version': 'version:2@1.5.10.cr0', + 'version': 'version:2@1.6.10.cr0', }, ], 'condition': 'checkout_android', @@ -2118,7 +2137,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common', - 'version': 'version:2@1.5.10.cr0', + 'version': 'version:2@1.6.10.cr0', }, ], 'condition': 'checkout_android', @@ -2389,7 +2408,7 @@ hooks = [ 'name': 'landmines', 'pattern': '.', 'action': [ - 'python', + 'python3', 'src/build/landmines.py', '--landmine-scripts', 'src/tools_webrtc/get_landmines.py', @@ -2403,7 +2422,7 @@ hooks = [ 'name': 'disable_depot_tools_selfupdate', 'pattern': '.', 'action': [ - 'python', + 'python3', 'src/third_party/depot_tools/update_depot_tools_toggle.py', '--disable', ], @@ -2412,14 +2431,14 @@ hooks = [ 'name': 'sysroot_arm', 'pattern': '.', 'condition': 'checkout_linux and checkout_arm', - 'action': ['python', 'src/build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python3', 'src/build/linux/sysroot_scripts/install-sysroot.py', '--arch=arm'], }, { 'name': 'sysroot_arm64', 'pattern': '.', 'condition': 'checkout_linux and checkout_arm64', - 'action': ['python', 'src/build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python3', 'src/build/linux/sysroot_scripts/install-sysroot.py', '--arch=arm64'], }, { @@ -2427,7 +2446,7 @@ hooks = [ 'pattern': '.', 'condition': 'checkout_linux and (checkout_x86 or checkout_x64)', # TODO(mbonadei): change to --arch=x86. - 'action': ['python', 'src/build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python3', 'src/build/linux/sysroot_scripts/install-sysroot.py', '--arch=i386'], }, { @@ -2435,7 +2454,7 @@ hooks = [ 'pattern': '.', 'condition': 'checkout_linux and checkout_mips', # TODO(mbonadei): change to --arch=mips. - 'action': ['python', 'src/build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python3', 'src/build/linux/sysroot_scripts/install-sysroot.py', '--arch=mipsel'], }, { @@ -2443,7 +2462,7 @@ hooks = [ 'pattern': '.', 'condition': 'checkout_linux and checkout_x64', # TODO(mbonadei): change to --arch=x64. - 'action': ['python', 'src/build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python3', 'src/build/linux/sysroot_scripts/install-sysroot.py', '--arch=amd64'], }, { @@ -2451,7 +2470,7 @@ hooks = [ 'name': 'ciopfs_linux', 'pattern': '.', 'condition': 'checkout_win and host_os == "linux"', - 'action': [ 'python', + 'action': [ 'python3', 'src/third_party/depot_tools/download_from_google_storage.py', '--no_resume', '--no_auth', @@ -2464,26 +2483,26 @@ hooks = [ 'name': 'win_toolchain', 'pattern': '.', 'condition': 'checkout_win', - 'action': ['python', 'src/build/vs_toolchain.py', 'update', '--force'], + 'action': ['python3', 'src/build/vs_toolchain.py', 'update', '--force'], }, { # Update the Mac toolchain if necessary. 'name': 'mac_toolchain', 'pattern': '.', 'condition': 'checkout_mac', - 'action': ['python', 'src/build/mac_toolchain.py'], + 'action': ['python3', 'src/build/mac_toolchain.py'], }, { # Note: On Win, this should run after win_toolchain, as it may use it. 'name': 'clang', 'pattern': '.', - 'action': ['python', 'src/tools/clang/scripts/update.py'], + 'action': ['python3', 'src/tools/clang/scripts/update.py'], }, { # Update LASTCHANGE. 'name': 'lastchange', 'pattern': '.', - 'action': ['python', 'src/build/util/lastchange.py', + 'action': ['python3', 'src/build/util/lastchange.py', '-o', 'src/build/util/LASTCHANGE'], }, # Pull clang-format binaries using checked-in hashes. @@ -2528,7 +2547,7 @@ hooks = [ 'name': 'rc_win', 'pattern': '.', 'condition': 'checkout_win and host_os == "win"', - 'action': [ 'python', + 'action': [ 'python3', 'src/third_party/depot_tools/download_from_google_storage.py', '--no_resume', '--no_auth', @@ -2540,7 +2559,7 @@ hooks = [ 'name': 'rc_mac', 'pattern': '.', 'condition': 'checkout_win and host_os == "mac"', - 'action': [ 'python', + 'action': [ 'python3', 'src/third_party/depot_tools/download_from_google_storage.py', '--no_resume', '--no_auth', @@ -2552,7 +2571,7 @@ hooks = [ 'name': 'rc_linux', 'pattern': '.', 'condition': 'checkout_win and host_os == "linux"', - 'action': [ 'python', + 'action': [ 'python3', 'src/third_party/depot_tools/download_from_google_storage.py', '--no_resume', '--no_auth', @@ -2575,24 +2594,24 @@ hooks = [ 'name': 'msan_chained_origins', 'pattern': '.', 'condition': 'checkout_instrumented_libraries', - 'action': [ 'python', + 'action': [ 'python3', 'src/third_party/depot_tools/download_from_google_storage.py', "--no_resume", "--no_auth", "--bucket", "chromium-instrumented-libraries", - "-s", "src/third_party/instrumented_libraries/binaries/msan-chained-origins-trusty.tgz.sha1", + "-s", "src/third_party/instrumented_libraries/binaries/msan-chained-origins.tgz.sha1", ], }, { 'name': 'msan_no_origins', 'pattern': '.', 'condition': 'checkout_instrumented_libraries', - 'action': [ 'python', + 'action': [ 'python3', 'src/third_party/depot_tools/download_from_google_storage.py', "--no_resume", "--no_auth", "--bucket", "chromium-instrumented-libraries", - "-s", "src/third_party/instrumented_libraries/binaries/msan-no-origins-trusty.tgz.sha1", + "-s", "src/third_party/instrumented_libraries/binaries/msan-no-origins.tgz.sha1", ], }, { @@ -2611,7 +2630,7 @@ hooks = [ 'name': 'Generate component metadata for tests', 'pattern': '.', 'action': [ - 'vpython', + 'vpython3', 'src/testing/generate_location_tags.py', '--out', 'src/testing/location_tags.json', @@ -2621,8 +2640,8 @@ hooks = [ { 'name': 'vpython_common', 'pattern': '.', - 'action': [ 'vpython', - '-vpython-spec', 'src/.vpython', + 'action': [ 'vpython3', + '-vpython-spec', 'src/.vpython3', '-vpython-tool', 'install', ], }, @@ -2656,10 +2675,12 @@ include_rules = [ "+absl/base/config.h", "+absl/base/const_init.h", "+absl/base/macros.h", + "+absl/cleanup/cleanup.h", "+absl/container/inlined_vector.h", "+absl/functional/bind_front.h", "+absl/memory/memory.h", "+absl/meta/type_traits.h", + "+absl/numeric/bits.h", "+absl/strings/ascii.h", "+absl/strings/match.h", "+absl/strings/str_replace.h", diff --git a/ENG_REVIEW_OWNERS b/ENG_REVIEW_OWNERS index b06ad40d0f..c509a278c1 100644 --- a/ENG_REVIEW_OWNERS +++ b/ENG_REVIEW_OWNERS @@ -7,4 +7,3 @@ danilchap@webrtc.org mbonadei@webrtc.org -phoglund@webrtc.org diff --git a/OWNERS b/OWNERS index 587c130ed7..4702befd9e 100644 --- a/OWNERS +++ b/OWNERS @@ -1,17 +1,20 @@ henrika@webrtc.org hta@webrtc.org -juberti@webrtc.org mflodman@webrtc.org stefan@webrtc.org tommi@webrtc.org per-file .gitignore=* per-file .gn=mbonadei@webrtc.org -per-file *.gn=mbonadei@webrtc.org +per-file BUILD.gn=mbonadei@webrtc.org +per-file .../BUILD.gn=mbonadei@webrtc.org per-file *.gni=mbonadei@webrtc.org +per-file .../*.gni=mbonadei@webrtc.org per-file .vpython=mbonadei@webrtc.org +per-file .vpython3=mbonadei@webrtc.org per-file AUTHORS=* per-file DEPS=* per-file pylintrc=mbonadei@webrtc.org per-file WATCHLISTS=* per-file native-api.md=mbonadei@webrtc.org -per-file *.lua=titovartem@webrtc.org +per-file ....lua=titovartem@webrtc.org +per-file .style.yapf=jleconte@webrtc.org diff --git a/PRESUBMIT.py b/PRESUBMIT.py index 21875f61af..b83f262e3d 100755 --- a/PRESUBMIT.py +++ b/PRESUBMIT.py @@ -1,3 +1,5 @@ +#!/usr/bin/env vpython3 + # Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -13,6 +15,9 @@ from collections import defaultdict from contextlib import contextmanager +# Runs PRESUBMIT.py in py3 mode by git cl presubmit. +USE_PYTHON3 = True + # Files and directories that are *skipped* by cpplint in the presubmit script. CPPLINT_EXCEPTIONS = [ 'api/video_codecs/video_decoder.h', @@ -31,6 +36,9 @@ 'modules/video_capture', 'p2p/base/pseudo_tcp.cc', 'p2p/base/pseudo_tcp.h', + 'PRESUBMIT.py', + 'presubmit_test_mocks.py', + 'presubmit_test.py', 'rtc_base', 'sdk/android/src/jni', 'sdk/objc', @@ -110,47 +118,47 @@ DEPS_RE = re.compile(r'\bdeps \+?= \[(?P.*?)\]', re.MULTILINE | re.DOTALL) -# FILE_PATH_RE matchies a file path. +# FILE_PATH_RE matches a file path. FILE_PATH_RE = re.compile(r'"(?P(\w|\/)+)(?P\.\w+)"') def FindSrcDirPath(starting_dir): - """Returns the abs path to the src/ dir of the project.""" - src_dir = starting_dir - while os.path.basename(src_dir) != 'src': - src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) - return src_dir + """Returns the abs path to the src/ dir of the project.""" + src_dir = starting_dir + while os.path.basename(src_dir) != 'src': + src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) + return src_dir @contextmanager def _AddToPath(*paths): - original_sys_path = sys.path - sys.path.extend(paths) - try: - yield - finally: - # Restore sys.path to what it was before. - sys.path = original_sys_path + original_sys_path = sys.path + sys.path.extend(paths) + try: + yield + finally: + # Restore sys.path to what it was before. + sys.path = original_sys_path def VerifyNativeApiHeadersListIsValid(input_api, output_api): - """Ensures the list of native API header directories is up to date.""" - non_existing_paths = [] - native_api_full_paths = [ - input_api.os_path.join(input_api.PresubmitLocalPath(), - *path.split('/')) for path in API_DIRS + """Ensures the list of native API header directories is up to date.""" + non_existing_paths = [] + native_api_full_paths = [ + input_api.os_path.join(input_api.PresubmitLocalPath(), *path.split('/')) + for path in API_DIRS + ] + for path in native_api_full_paths: + if not os.path.isdir(path): + non_existing_paths.append(path) + if non_existing_paths: + return [ + output_api.PresubmitError( + 'Directories to native API headers have changed which has made ' + 'the list in PRESUBMIT.py outdated.\nPlease update it to the ' + 'current location of our native APIs.', non_existing_paths) ] - for path in native_api_full_paths: - if not os.path.isdir(path): - non_existing_paths.append(path) - if non_existing_paths: - return [ - output_api.PresubmitError( - 'Directories to native API headers have changed which has made ' - 'the list in PRESUBMIT.py outdated.\nPlease update it to the ' - 'current location of our native APIs.', non_existing_paths) - ] - return [] + return [] API_CHANGE_MSG = """ @@ -174,610 +182,592 @@ def VerifyNativeApiHeadersListIsValid(input_api, output_api): def CheckNativeApiHeaderChanges(input_api, output_api): - """Checks to remind proper changing of native APIs.""" - files = [] - source_file_filter = lambda x: input_api.FilterSourceFile( - x, files_to_check=[r'.+\.(gn|gni|h)$']) - for f in input_api.AffectedSourceFiles(source_file_filter): - for path in API_DIRS: - dn = os.path.dirname(f.LocalPath()) - if path == 'api': - # Special case: Subdirectories included. - if dn == 'api' or dn.startswith('api/'): - files.append(f.LocalPath()) - else: - # Normal case: Subdirectories not included. - if dn == path: - files.append(f.LocalPath()) - - if files: - return [output_api.PresubmitNotifyResult(API_CHANGE_MSG, files)] - return [] + """Checks to remind proper changing of native APIs.""" + files = [] + source_file_filter = lambda x: input_api.FilterSourceFile( + x, files_to_check=[r'.+\.(gn|gni|h)$']) + for f in input_api.AffectedSourceFiles(source_file_filter): + for path in API_DIRS: + dn = os.path.dirname(f.LocalPath()) + if path == 'api': + # Special case: Subdirectories included. + if dn == 'api' or dn.startswith('api/'): + files.append(f.LocalPath()) + else: + # Normal case: Subdirectories not included. + if dn == path: + files.append(f.LocalPath()) + + if files: + return [output_api.PresubmitNotifyResult(API_CHANGE_MSG, files)] + return [] def CheckNoIOStreamInHeaders(input_api, output_api, source_file_filter): - """Checks to make sure no .h files include .""" - files = [] - pattern = input_api.re.compile(r'^#include\s*', - input_api.re.MULTILINE) - file_filter = lambda x: (input_api.FilterSourceFile(x) and - source_file_filter(x)) - for f in input_api.AffectedSourceFiles(file_filter): - if not f.LocalPath().endswith('.h'): - continue - contents = input_api.ReadFile(f) - if pattern.search(contents): - files.append(f) - - if len(files): - return [ - output_api.PresubmitError( - 'Do not #include in header files, since it inserts ' - 'static initialization into every file including the header. ' - 'Instead, #include . See http://crbug.com/94794', - files) - ] - return [] + """Checks to make sure no .h files include .""" + files = [] + pattern = input_api.re.compile(r'^#include\s*', + input_api.re.MULTILINE) + file_filter = lambda x: (input_api.FilterSourceFile(x) and source_file_filter( + x)) + for f in input_api.AffectedSourceFiles(file_filter): + if not f.LocalPath().endswith('.h'): + continue + contents = input_api.ReadFile(f) + if pattern.search(contents): + files.append(f) + + if len(files) > 0: + return [ + output_api.PresubmitError( + 'Do not #include in header files, since it inserts ' + 'static initialization into every file including the header. ' + 'Instead, #include . See http://crbug.com/94794', files) + ] + return [] def CheckNoPragmaOnce(input_api, output_api, source_file_filter): - """Make sure that banned functions are not used.""" - files = [] - pattern = input_api.re.compile(r'^#pragma\s+once', input_api.re.MULTILINE) - file_filter = lambda x: (input_api.FilterSourceFile(x) and - source_file_filter(x)) - for f in input_api.AffectedSourceFiles(file_filter): - if not f.LocalPath().endswith('.h'): - continue - contents = input_api.ReadFile(f) - if pattern.search(contents): - files.append(f) - - if files: - return [ - output_api.PresubmitError( - 'Do not use #pragma once in header files.\n' - 'See http://www.chromium.org/developers/coding-style' - '#TOC-File-headers', - files) - ] - return [] + """Make sure that banned functions are not used.""" + files = [] + pattern = input_api.re.compile(r'^#pragma\s+once', input_api.re.MULTILINE) + file_filter = lambda x: (input_api.FilterSourceFile(x) and source_file_filter( + x)) + for f in input_api.AffectedSourceFiles(file_filter): + if not f.LocalPath().endswith('.h'): + continue + contents = input_api.ReadFile(f) + if pattern.search(contents): + files.append(f) + + if files: + return [ + output_api.PresubmitError( + 'Do not use #pragma once in header files.\n' + 'See http://www.chromium.org/developers/coding-style' + '#TOC-File-headers', files) + ] + return [] + def CheckNoFRIEND_TEST(# pylint: disable=invalid-name input_api, output_api, source_file_filter): - """Make sure that gtest's FRIEND_TEST() macro is not used, the + """Make sure that gtest's FRIEND_TEST() macro is not used, the FRIEND_TEST_ALL_PREFIXES() macro from testsupport/gtest_prod_util.h should be used instead since that allows for FLAKY_, FAILS_ and DISABLED_ prefixes.""" - problems = [] + problems = [] - file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and - source_file_filter(f)) - for f in input_api.AffectedFiles(file_filter=file_filter): - for line_num, line in f.ChangedContents(): - if 'FRIEND_TEST(' in line: - problems.append(' %s:%d' % (f.LocalPath(), line_num)) + file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and + source_file_filter(f)) + for f in input_api.AffectedFiles(file_filter=file_filter): + for line_num, line in f.ChangedContents(): + if 'FRIEND_TEST(' in line: + problems.append(' %s:%d' % (f.LocalPath(), line_num)) - if not problems: - return [] - return [ - output_api.PresubmitPromptWarning( - 'WebRTC\'s code should not use gtest\'s FRIEND_TEST() macro. ' - 'Include testsupport/gtest_prod_util.h and use ' - 'FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems)) - ] + if not problems: + return [] + return [ + output_api.PresubmitPromptWarning( + 'WebRTC\'s code should not use gtest\'s FRIEND_TEST() macro. ' + 'Include testsupport/gtest_prod_util.h and use ' + 'FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems)) + ] def IsLintDisabled(disabled_paths, file_path): - """ Checks if a file is disabled for lint check.""" - for path in disabled_paths: - if file_path == path or os.path.dirname(file_path).startswith(path): - return True - return False + """ Checks if a file is disabled for lint check.""" + for path in disabled_paths: + if file_path == path or os.path.dirname(file_path).startswith(path): + return True + return False def CheckApprovedFilesLintClean(input_api, output_api, source_file_filter=None): - """Checks that all new or non-exempt .cc and .h files pass cpplint.py. + """Checks that all new or non-exempt .cc and .h files pass cpplint.py. This check is based on CheckChangeLintsClean in depot_tools/presubmit_canned_checks.py but has less filters and only checks added files.""" - result = [] - - # Initialize cpplint. - import cpplint - # Access to a protected member _XX of a client class - # pylint: disable=W0212 - cpplint._cpplint_state.ResetErrorCounts() - - lint_filters = cpplint._Filters() - lint_filters.extend(DISABLED_LINT_FILTERS) - cpplint._SetFilters(','.join(lint_filters)) - - # Create a platform independent exempt list for cpplint. - disabled_paths = [ - input_api.os_path.join(*path.split('/')) for path in CPPLINT_EXCEPTIONS - ] - - # Use the strictest verbosity level for cpplint.py (level 1) which is the - # default when running cpplint.py from command line. To make it possible to - # work with not-yet-converted code, we're only applying it to new (or - # moved/renamed) files and files not listed in CPPLINT_EXCEPTIONS. - verbosity_level = 1 - files = [] - for f in input_api.AffectedSourceFiles(source_file_filter): - # Note that moved/renamed files also count as added. - if f.Action() == 'A' or not IsLintDisabled(disabled_paths, - f.LocalPath()): - files.append(f.AbsoluteLocalPath()) - - for file_name in files: - cpplint.ProcessFile(file_name, verbosity_level) - - if cpplint._cpplint_state.error_count > 0: - if input_api.is_committing: - res_type = output_api.PresubmitError - else: - res_type = output_api.PresubmitPromptWarning - result = [res_type('Changelist failed cpplint.py check.')] + result = [] + + # Initialize cpplint. + import cpplint + # Access to a protected member _XX of a client class + # pylint: disable=W0212 + cpplint._cpplint_state.ResetErrorCounts() + + lint_filters = cpplint._Filters() + lint_filters.extend(DISABLED_LINT_FILTERS) + cpplint._SetFilters(','.join(lint_filters)) + + # Create a platform independent exempt list for cpplint. + disabled_paths = [ + input_api.os_path.join(*path.split('/')) for path in CPPLINT_EXCEPTIONS + ] + + # Use the strictest verbosity level for cpplint.py (level 1) which is the + # default when running cpplint.py from command line. To make it possible to + # work with not-yet-converted code, we're only applying it to new (or + # moved/renamed) files and files not listed in CPPLINT_EXCEPTIONS. + verbosity_level = 1 + files = [] + for f in input_api.AffectedSourceFiles(source_file_filter): + # Note that moved/renamed files also count as added. + if f.Action() == 'A' or not IsLintDisabled(disabled_paths, f.LocalPath()): + files.append(f.AbsoluteLocalPath()) + + for file_name in files: + cpplint.ProcessFile(file_name, verbosity_level) + + if cpplint._cpplint_state.error_count > 0: + if input_api.is_committing: + res_type = output_api.PresubmitError + else: + res_type = output_api.PresubmitPromptWarning + result = [res_type('Changelist failed cpplint.py check.')] - return result + return result def CheckNoSourcesAbove(input_api, gn_files, output_api): - # Disallow referencing source files with paths above the GN file location. - source_pattern = input_api.re.compile(r' +sources \+?= \[(.*?)\]', - re.MULTILINE | re.DOTALL) - file_pattern = input_api.re.compile(r'"((\.\./.*?)|(//.*?))"') - violating_gn_files = set() - violating_source_entries = [] - for gn_file in gn_files: - contents = input_api.ReadFile(gn_file) - for source_block_match in source_pattern.finditer(contents): - # Find all source list entries starting with ../ in the source block - # (exclude overrides entries). - for file_list_match in file_pattern.finditer( - source_block_match.group(1)): - source_file = file_list_match.group(1) - if 'overrides/' not in source_file: - violating_source_entries.append(source_file) - violating_gn_files.add(gn_file) - if violating_gn_files: - return [ - output_api.PresubmitError( - 'Referencing source files above the directory of the GN file ' - 'is not allowed. Please introduce new GN targets in the proper ' - 'location instead.\n' - 'Invalid source entries:\n' - '%s\n' - 'Violating GN files:' % '\n'.join(violating_source_entries), - items=violating_gn_files) - ] - return [] + # Disallow referencing source files with paths above the GN file location. + source_pattern = input_api.re.compile(r' +sources \+?= \[(.*?)\]', + re.MULTILINE | re.DOTALL) + file_pattern = input_api.re.compile(r'"((\.\./.*?)|(//.*?))"') + violating_gn_files = set() + violating_source_entries = [] + for gn_file in gn_files: + contents = input_api.ReadFile(gn_file) + for source_block_match in source_pattern.finditer(contents): + # Find all source list entries starting with ../ in the source block + # (exclude overrides entries). + for file_list_match in file_pattern.finditer(source_block_match.group(1)): + source_file = file_list_match.group(1) + if 'overrides/' not in source_file: + violating_source_entries.append(source_file) + violating_gn_files.add(gn_file) + if violating_gn_files: + return [ + output_api.PresubmitError( + 'Referencing source files above the directory of the GN file ' + 'is not allowed. Please introduce new GN targets in the proper ' + 'location instead.\n' + 'Invalid source entries:\n' + '%s\n' + 'Violating GN files:' % '\n'.join(violating_source_entries), + items=violating_gn_files) + ] + return [] def CheckAbseilDependencies(input_api, gn_files, output_api): - """Checks that Abseil dependencies are declared in `absl_deps`.""" - absl_re = re.compile(r'third_party/abseil-cpp', re.MULTILINE | re.DOTALL) - target_types_to_check = [ - 'rtc_library', - 'rtc_source_set', - 'rtc_static_library', - 'webrtc_fuzzer_test', - ] - error_msg = ('Abseil dependencies in target "%s" (file: %s) ' - 'should be moved to the "absl_deps" parameter.') - errors = [] - - for gn_file in gn_files: - gn_file_content = input_api.ReadFile(gn_file) - for target_match in TARGET_RE.finditer(gn_file_content): - target_type = target_match.group('target_type') - target_name = target_match.group('target_name') - target_contents = target_match.group('target_contents') - if target_type in target_types_to_check: - for deps_match in DEPS_RE.finditer(target_contents): - deps = deps_match.group('deps').splitlines() - for dep in deps: - if re.search(absl_re, dep): - errors.append( - output_api.PresubmitError( - error_msg % - (target_name, gn_file.LocalPath()))) - break # no need to warn more than once per target - return errors + """Checks that Abseil dependencies are declared in `absl_deps`.""" + absl_re = re.compile(r'third_party/abseil-cpp', re.MULTILINE | re.DOTALL) + target_types_to_check = [ + 'rtc_library', + 'rtc_source_set', + 'rtc_static_library', + 'webrtc_fuzzer_test', + ] + error_msg = ('Abseil dependencies in target "%s" (file: %s) ' + 'should be moved to the "absl_deps" parameter.') + errors = [] + + # pylint: disable=too-many-nested-blocks + for gn_file in gn_files: + gn_file_content = input_api.ReadFile(gn_file) + for target_match in TARGET_RE.finditer(gn_file_content): + target_type = target_match.group('target_type') + target_name = target_match.group('target_name') + target_contents = target_match.group('target_contents') + if target_type in target_types_to_check: + for deps_match in DEPS_RE.finditer(target_contents): + deps = deps_match.group('deps').splitlines() + for dep in deps: + if re.search(absl_re, dep): + errors.append( + output_api.PresubmitError(error_msg % + (target_name, gn_file.LocalPath()))) + break # no need to warn more than once per target + return errors def CheckNoMixingSources(input_api, gn_files, output_api): - """Disallow mixing C, C++ and Obj-C/Obj-C++ in the same target. + """Disallow mixing C, C++ and Obj-C/Obj-C++ in the same target. See bugs.webrtc.org/7743 for more context. """ - def _MoreThanOneSourceUsed(*sources_lists): - sources_used = 0 - for source_list in sources_lists: - if len(source_list): - sources_used += 1 - return sources_used > 1 - - errors = defaultdict(lambda: []) - for gn_file in gn_files: - gn_file_content = input_api.ReadFile(gn_file) - for target_match in TARGET_RE.finditer(gn_file_content): - # list_of_sources is a list of tuples of the form - # (c_files, cc_files, objc_files) that keeps track of all the - # sources defined in a target. A GN target can have more that - # on definition of sources (since it supports if/else statements). - # E.g.: - # rtc_static_library("foo") { - # if (is_win) { - # sources = [ "foo.cc" ] - # } else { - # sources = [ "foo.mm" ] - # } - # } - # This is allowed and the presubmit check should support this case. - list_of_sources = [] - c_files = [] - cc_files = [] - objc_files = [] - target_name = target_match.group('target_name') - target_contents = target_match.group('target_contents') - for sources_match in SOURCES_RE.finditer(target_contents): - if '+=' not in sources_match.group(0): - if c_files or cc_files or objc_files: - list_of_sources.append((c_files, cc_files, objc_files)) - c_files = [] - cc_files = [] - objc_files = [] - for file_match in FILE_PATH_RE.finditer( - sources_match.group(1)): - file_path = file_match.group('file_path') - extension = file_match.group('extension') - if extension == '.c': - c_files.append(file_path + extension) - if extension == '.cc': - cc_files.append(file_path + extension) - if extension in ['.m', '.mm']: - objc_files.append(file_path + extension) + def _MoreThanOneSourceUsed(*sources_lists): + sources_used = 0 + for source_list in sources_lists: + if len(source_list) > 0: + sources_used += 1 + return sources_used > 1 + + errors = defaultdict(lambda: []) + for gn_file in gn_files: + gn_file_content = input_api.ReadFile(gn_file) + for target_match in TARGET_RE.finditer(gn_file_content): + # list_of_sources is a list of tuples of the form + # (c_files, cc_files, objc_files) that keeps track of all the + # sources defined in a target. A GN target can have more that + # on definition of sources (since it supports if/else statements). + # E.g.: + # rtc_static_library("foo") { + # if (is_win) { + # sources = [ "foo.cc" ] + # } else { + # sources = [ "foo.mm" ] + # } + # } + # This is allowed and the presubmit check should support this case. + list_of_sources = [] + c_files = [] + cc_files = [] + objc_files = [] + target_name = target_match.group('target_name') + target_contents = target_match.group('target_contents') + for sources_match in SOURCES_RE.finditer(target_contents): + if '+=' not in sources_match.group(0): + if c_files or cc_files or objc_files: list_of_sources.append((c_files, cc_files, objc_files)) - for c_files_list, cc_files_list, objc_files_list in list_of_sources: - if _MoreThanOneSourceUsed(c_files_list, cc_files_list, - objc_files_list): - all_sources = sorted(c_files_list + cc_files_list + - objc_files_list) - errors[gn_file.LocalPath()].append( - (target_name, all_sources)) - if errors: - return [ - output_api.PresubmitError( - 'GN targets cannot mix .c, .cc and .m (or .mm) source files.\n' - 'Please create a separate target for each collection of ' - 'sources.\n' - 'Mixed sources: \n' - '%s\n' - 'Violating GN files:\n%s\n' % - (json.dumps(errors, indent=2), '\n'.join(errors.keys()))) - ] - return [] + c_files = [] + cc_files = [] + objc_files = [] + for file_match in FILE_PATH_RE.finditer(sources_match.group(1)): + file_path = file_match.group('file_path') + extension = file_match.group('extension') + if extension == '.c': + c_files.append(file_path + extension) + if extension == '.cc': + cc_files.append(file_path + extension) + if extension in ['.m', '.mm']: + objc_files.append(file_path + extension) + list_of_sources.append((c_files, cc_files, objc_files)) + for c_files_list, cc_files_list, objc_files_list in list_of_sources: + if _MoreThanOneSourceUsed(c_files_list, cc_files_list, objc_files_list): + all_sources = sorted(c_files_list + cc_files_list + objc_files_list) + errors[gn_file.LocalPath()].append((target_name, all_sources)) + if errors: + return [ + output_api.PresubmitError( + 'GN targets cannot mix .c, .cc and .m (or .mm) source files.\n' + 'Please create a separate target for each collection of ' + 'sources.\n' + 'Mixed sources: \n' + '%s\n' + 'Violating GN files:\n%s\n' % + (json.dumps(errors, indent=2), '\n'.join(list(errors.keys())))) + ] + return [] def CheckNoPackageBoundaryViolations(input_api, gn_files, output_api): - cwd = input_api.PresubmitLocalPath() - with _AddToPath( - input_api.os_path.join(cwd, 'tools_webrtc', - 'presubmit_checks_lib')): - from check_package_boundaries import CheckPackageBoundaries - build_files = [ - os.path.join(cwd, gn_file.LocalPath()) for gn_file in gn_files + cwd = input_api.PresubmitLocalPath() + with _AddToPath( + input_api.os_path.join(cwd, 'tools_webrtc', 'presubmit_checks_lib')): + from check_package_boundaries import CheckPackageBoundaries + build_files = [os.path.join(cwd, gn_file.LocalPath()) for gn_file in gn_files] + errors = CheckPackageBoundaries(cwd, build_files)[:5] + if errors: + return [ + output_api.PresubmitError( + 'There are package boundary violations in the following GN ' + 'files:', + long_text='\n\n'.join(str(err) for err in errors)) ] - errors = CheckPackageBoundaries(cwd, build_files)[:5] - if errors: - return [ - output_api.PresubmitError( - 'There are package boundary violations in the following GN ' - 'files:', long_text='\n\n'.join(str(err) for err in errors)) - ] - return [] + return [] def _ReportFileAndLine(filename, line_num): - """Default error formatter for _FindNewViolationsOfRule.""" - return '%s (line %s)' % (filename, line_num) + """Default error formatter for _FindNewViolationsOfRule.""" + return '%s (line %s)' % (filename, line_num) def CheckNoWarningSuppressionFlagsAreAdded(gn_files, input_api, output_api, error_formatter=_ReportFileAndLine): - """Ensure warning suppression flags are not added wihtout a reason.""" - msg = ('Usage of //build/config/clang:extra_warnings is discouraged ' - 'in WebRTC.\n' - 'If you are not adding this code (e.g. you are just moving ' - 'existing code) or you want to add an exception,\n' - 'you can add a comment on the line that causes the problem:\n\n' - '"-Wno-odr" # no-presubmit-check TODO(bugs.webrtc.org/BUG_ID)\n' - '\n' - 'Affected files:\n') - errors = [] # 2-element tuples with (file, line number) - clang_warn_re = input_api.re.compile( - r'//build/config/clang:extra_warnings') - no_presubmit_re = input_api.re.compile( - r'# no-presubmit-check TODO\(bugs\.webrtc\.org/\d+\)') - for f in gn_files: - for line_num, line in f.ChangedContents(): - if clang_warn_re.search(line) and not no_presubmit_re.search(line): - errors.append(error_formatter(f.LocalPath(), line_num)) - if errors: - return [output_api.PresubmitError(msg, errors)] - return [] + """Ensure warning suppression flags are not added without a reason.""" + msg = ('Usage of //build/config/clang:extra_warnings is discouraged ' + 'in WebRTC.\n' + 'If you are not adding this code (e.g. you are just moving ' + 'existing code) or you want to add an exception,\n' + 'you can add a comment on the line that causes the problem:\n\n' + '"-Wno-odr" # no-presubmit-check TODO(bugs.webrtc.org/BUG_ID)\n' + '\n' + 'Affected files:\n') + errors = [] # 2-element tuples with (file, line number) + clang_warn_re = input_api.re.compile(r'//build/config/clang:extra_warnings') + # pylint: disable-next=fixme + no_presubmit_re = input_api.re.compile( + r'# no-presubmit-check TODO\(bugs\.webrtc\.org/\d+\)') + for f in gn_files: + for line_num, line in f.ChangedContents(): + if clang_warn_re.search(line) and not no_presubmit_re.search(line): + errors.append(error_formatter(f.LocalPath(), line_num)) + if errors: + return [output_api.PresubmitError(msg, errors)] + return [] def CheckNoTestCaseUsageIsAdded(input_api, output_api, source_file_filter, error_formatter=_ReportFileAndLine): - error_msg = ('Usage of legacy GoogleTest API detected!\nPlease use the ' - 'new API: https://github.com/google/googletest/blob/master/' - 'googletest/docs/primer.md#beware-of-the-nomenclature.\n' - 'Affected files:\n') - errors = [] # 2-element tuples with (file, line number) - test_case_re = input_api.re.compile(r'TEST_CASE') - file_filter = lambda f: (source_file_filter(f) and f.LocalPath().endswith( - '.cc')) - for f in input_api.AffectedSourceFiles(file_filter): - for line_num, line in f.ChangedContents(): - if test_case_re.search(line): - errors.append(error_formatter(f.LocalPath(), line_num)) - if errors: - return [output_api.PresubmitError(error_msg, errors)] - return [] + error_msg = ('Usage of legacy GoogleTest API detected!\nPlease use the ' + 'new API: https://github.com/google/googletest/blob/master/' + 'googletest/docs/primer.md#beware-of-the-nomenclature.\n' + 'Affected files:\n') + errors = [] # 2-element tuples with (file, line number) + test_case_re = input_api.re.compile(r'TEST_CASE') + file_filter = lambda f: (source_file_filter(f) and f.LocalPath().endswith( + '.cc')) + for f in input_api.AffectedSourceFiles(file_filter): + for line_num, line in f.ChangedContents(): + if test_case_re.search(line): + errors.append(error_formatter(f.LocalPath(), line_num)) + if errors: + return [output_api.PresubmitError(error_msg, errors)] + return [] def CheckNoStreamUsageIsAdded(input_api, output_api, source_file_filter, error_formatter=_ReportFileAndLine): - """Make sure that no more dependencies on stringstream are added.""" - error_msg = ( - 'Usage of , and in WebRTC is ' - 'deprecated.\n' - 'This includes the following types:\n' - 'std::istringstream, std::ostringstream, std::wistringstream, ' - 'std::wostringstream,\n' - 'std::wstringstream, std::ostream, std::wostream, std::istream,' - 'std::wistream,\n' - 'std::iostream, std::wiostream.\n' - 'If you are not adding this code (e.g. you are just moving ' - 'existing code),\n' - 'you can add a comment on the line that causes the problem:\n\n' - '#include // no-presubmit-check TODO(webrtc:8982)\n' - 'std::ostream& F() { // no-presubmit-check TODO(webrtc:8982)\n' - '\n' - 'If you are adding new code, consider using ' - 'rtc::SimpleStringBuilder\n' - '(in rtc_base/strings/string_builder.h).\n' - 'Affected files:\n') - errors = [] # 2-element tuples with (file, line number) - include_re = input_api.re.compile(r'#include <(i|o|s)stream>') - usage_re = input_api.re.compile( - r'std::(w|i|o|io|wi|wo|wio)(string)*stream') - no_presubmit_re = input_api.re.compile( - r'// no-presubmit-check TODO\(webrtc:8982\)') - file_filter = lambda x: (input_api.FilterSourceFile(x) and - source_file_filter(x)) - - def _IsException(file_path): - is_test = any( - file_path.endswith(x) for x in - ['_test.cc', '_tests.cc', '_unittest.cc', '_unittests.cc']) - return (file_path.startswith('examples') - or file_path.startswith('test') or is_test) - - for f in input_api.AffectedSourceFiles(file_filter): - # Usage of stringstream is allowed under examples/ and in tests. - if f.LocalPath() == 'PRESUBMIT.py' or _IsException(f.LocalPath()): - continue - for line_num, line in f.ChangedContents(): - if ((include_re.search(line) or usage_re.search(line)) - and not no_presubmit_re.search(line)): - errors.append(error_formatter(f.LocalPath(), line_num)) - if errors: - return [output_api.PresubmitError(error_msg, errors)] - return [] + """Make sure that no more dependencies on stringstream are added.""" + error_msg = ('Usage of , and in WebRTC is ' + 'deprecated.\n' + 'This includes the following types:\n' + 'std::istringstream, std::ostringstream, std::wistringstream, ' + 'std::wostringstream,\n' + 'std::wstringstream, std::ostream, std::wostream, std::istream,' + 'std::wistream,\n' + 'std::iostream, std::wiostream.\n' + 'If you are not adding this code (e.g. you are just moving ' + 'existing code),\n' + 'you can add a comment on the line that causes the problem:\n\n' + '#include // no-presubmit-check TODO(webrtc:8982)\n' + 'std::ostream& F() { // no-presubmit-check TODO(webrtc:8982)\n' + '\n' + 'If you are adding new code, consider using ' + 'rtc::SimpleStringBuilder\n' + '(in rtc_base/strings/string_builder.h).\n' + 'Affected files:\n') + errors = [] # 2-element tuples with (file, line number) + include_re = input_api.re.compile(r'#include <(i|o|s)stream>') + usage_re = input_api.re.compile(r'std::(w|i|o|io|wi|wo|wio)(string)*stream') + no_presubmit_re = input_api.re.compile( + r'// no-presubmit-check TODO\(webrtc:8982\)') + file_filter = lambda x: (input_api.FilterSourceFile(x) and source_file_filter( + x)) + + def _IsException(file_path): + is_test = any( + file_path.endswith(x) + for x in ['_test.cc', '_tests.cc', '_unittest.cc', '_unittests.cc']) + return (file_path.startswith('examples') or file_path.startswith('test') + or is_test) + + for f in input_api.AffectedSourceFiles(file_filter): + # Usage of stringstream is allowed under examples/ and in tests. + if f.LocalPath() == 'PRESUBMIT.py' or _IsException(f.LocalPath()): + continue + for line_num, line in f.ChangedContents(): + if ((include_re.search(line) or usage_re.search(line)) + and not no_presubmit_re.search(line)): + errors.append(error_formatter(f.LocalPath(), line_num)) + if errors: + return [output_api.PresubmitError(error_msg, errors)] + return [] def CheckPublicDepsIsNotUsed(gn_files, input_api, output_api): - """Checks that public_deps is not used without a good reason.""" - result = [] - no_presubmit_check_re = input_api.re.compile( - r'# no-presubmit-check TODO\(webrtc:\d+\)') - error_msg = ('public_deps is not recommended in WebRTC BUILD.gn files ' - 'because it doesn\'t map well to downstream build systems.\n' - 'Used in: %s (line %d).\n' - 'If you are not adding this code (e.g. you are just moving ' - 'existing code) or you have a good reason, you can add this ' - 'comment (verbatim) on the line that causes the problem:\n\n' - 'public_deps = [ # no-presubmit-check TODO(webrtc:8603)\n') - for affected_file in gn_files: - for (line_number, affected_line) in affected_file.ChangedContents(): - if 'public_deps' in affected_line: - surpressed = no_presubmit_check_re.search(affected_line) - if not surpressed: - result.append( - output_api.PresubmitError( - error_msg % - (affected_file.LocalPath(), line_number))) - return result + """Checks that public_deps is not used without a good reason.""" + result = [] + no_presubmit_check_re = input_api.re.compile( + r'# no-presubmit-check TODO\(webrtc:\d+\)') + error_msg = ('public_deps is not recommended in WebRTC BUILD.gn files ' + 'because it doesn\'t map well to downstream build systems.\n' + 'Used in: %s (line %d).\n' + 'If you are not adding this code (e.g. you are just moving ' + 'existing code) or you have a good reason, you can add this ' + 'comment (verbatim) on the line that causes the problem:\n\n' + 'public_deps = [ # no-presubmit-check TODO(webrtc:8603)\n') + for affected_file in gn_files: + for (line_number, affected_line) in affected_file.ChangedContents(): + if 'public_deps' in affected_line: + surpressed = no_presubmit_check_re.search(affected_line) + if not surpressed: + result.append( + output_api.PresubmitError( + error_msg % (affected_file.LocalPath(), line_number))) + return result def CheckCheckIncludesIsNotUsed(gn_files, input_api, output_api): - result = [] - error_msg = ( - 'check_includes overrides are not allowed since it can cause ' - 'incorrect dependencies to form. It effectively means that your ' - 'module can include any .h file without depending on its ' - 'corresponding target. There are some exceptional cases when ' - 'this is allowed: if so, get approval from a .gn owner in the ' - 'root OWNERS file.\n' - 'Used in: %s (line %d).') - no_presubmit_re = input_api.re.compile( - r'# no-presubmit-check TODO\(bugs\.webrtc\.org/\d+\)') - for affected_file in gn_files: - for (line_number, affected_line) in affected_file.ChangedContents(): - if ('check_includes' in affected_line - and not no_presubmit_re.search(affected_line)): - result.append( - output_api.PresubmitError( - error_msg % (affected_file.LocalPath(), line_number))) - return result + result = [] + error_msg = ('check_includes overrides are not allowed since it can cause ' + 'incorrect dependencies to form. It effectively means that your ' + 'module can include any .h file without depending on its ' + 'corresponding target. There are some exceptional cases when ' + 'this is allowed: if so, get approval from a .gn owner in the ' + 'root OWNERS file.\n' + 'Used in: %s (line %d).') + # pylint: disable-next=fixme + no_presubmit_re = input_api.re.compile( + r'# no-presubmit-check TODO\(bugs\.webrtc\.org/\d+\)') + for affected_file in gn_files: + for (line_number, affected_line) in affected_file.ChangedContents(): + if ('check_includes' in affected_line + and not no_presubmit_re.search(affected_line)): + result.append( + output_api.PresubmitError(error_msg % + (affected_file.LocalPath(), line_number))) + return result def CheckGnChanges(input_api, output_api): - file_filter = lambda x: (input_api.FilterSourceFile( - x, - files_to_check=(r'.+\.(gn|gni)$', ), - files_to_skip=(r'.*/presubmit_checks_lib/testdata/.*', ))) - - gn_files = [] - for f in input_api.AffectedSourceFiles(file_filter): - gn_files.append(f) - - result = [] - if gn_files: - result.extend(CheckNoSourcesAbove(input_api, gn_files, output_api)) - result.extend(CheckNoMixingSources(input_api, gn_files, output_api)) - result.extend(CheckAbseilDependencies(input_api, gn_files, output_api)) - result.extend( - CheckNoPackageBoundaryViolations(input_api, gn_files, output_api)) - result.extend(CheckPublicDepsIsNotUsed(gn_files, input_api, - output_api)) - result.extend( - CheckCheckIncludesIsNotUsed(gn_files, input_api, output_api)) - result.extend( - CheckNoWarningSuppressionFlagsAreAdded(gn_files, input_api, - output_api)) - return result + file_filter = lambda x: (input_api.FilterSourceFile( + x, + files_to_check=(r'.+\.(gn|gni)$', ), + files_to_skip=(r'.*/presubmit_checks_lib/testdata/.*', ))) + + gn_files = [] + for f in input_api.AffectedSourceFiles(file_filter): + gn_files.append(f) + + result = [] + if gn_files: + result.extend(CheckNoSourcesAbove(input_api, gn_files, output_api)) + result.extend(CheckNoMixingSources(input_api, gn_files, output_api)) + result.extend(CheckAbseilDependencies(input_api, gn_files, output_api)) + result.extend( + CheckNoPackageBoundaryViolations(input_api, gn_files, output_api)) + result.extend(CheckPublicDepsIsNotUsed(gn_files, input_api, output_api)) + result.extend(CheckCheckIncludesIsNotUsed(gn_files, input_api, output_api)) + result.extend( + CheckNoWarningSuppressionFlagsAreAdded(gn_files, input_api, output_api)) + return result def CheckGnGen(input_api, output_api): - """Runs `gn gen --check` with default args to detect mismatches between + """Runs `gn gen --check` with default args to detect mismatches between #includes and dependencies in the BUILD.gn files, as well as general build errors. """ - with _AddToPath( - input_api.os_path.join(input_api.PresubmitLocalPath(), - 'tools_webrtc', 'presubmit_checks_lib')): - from build_helpers import RunGnCheck - errors = RunGnCheck(FindSrcDirPath(input_api.PresubmitLocalPath()))[:5] - if errors: - return [ - output_api.PresubmitPromptWarning( - 'Some #includes do not match the build dependency graph. ' - 'Please run:\n' - ' gn gen --check ', - long_text='\n\n'.join(errors)) - ] - return [] + with _AddToPath( + input_api.os_path.join(input_api.PresubmitLocalPath(), 'tools_webrtc', + 'presubmit_checks_lib')): + from build_helpers import RunGnCheck + errors = RunGnCheck(FindSrcDirPath(input_api.PresubmitLocalPath()))[:5] + if errors: + return [ + output_api.PresubmitPromptWarning( + 'Some #includes do not match the build dependency graph. ' + 'Please run:\n' + ' gn gen --check ', + long_text='\n\n'.join(errors)) + ] + return [] def CheckUnwantedDependencies(input_api, output_api, source_file_filter): - """Runs checkdeps on #include statements added in this + """Runs checkdeps on #include statements added in this change. Breaking - rules is an error, breaking ! rules is a warning. """ - # Copied from Chromium's src/PRESUBMIT.py. - - # We need to wait until we have an input_api object and use this - # roundabout construct to import checkdeps because this file is - # eval-ed and thus doesn't have __file__. - src_path = FindSrcDirPath(input_api.PresubmitLocalPath()) - checkdeps_path = input_api.os_path.join(src_path, 'buildtools', - 'checkdeps') - if not os.path.exists(checkdeps_path): - return [ - output_api.PresubmitError( - 'Cannot find checkdeps at %s\nHave you run "gclient sync" to ' - 'download all the DEPS entries?' % checkdeps_path) - ] - with _AddToPath(checkdeps_path): - import checkdeps - from cpp_checker import CppChecker - from rules import Rule - - added_includes = [] - for f in input_api.AffectedFiles(file_filter=source_file_filter): - if not CppChecker.IsCppFile(f.LocalPath()): - continue - - changed_lines = [line for _, line in f.ChangedContents()] - added_includes.append([f.LocalPath(), changed_lines]) - - deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath()) - - error_descriptions = [] - warning_descriptions = [] - for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes( - added_includes): - description_with_path = '%s\n %s' % (path, rule_description) - if rule_type == Rule.DISALLOW: - error_descriptions.append(description_with_path) - else: - warning_descriptions.append(description_with_path) - - results = [] - if error_descriptions: - results.append( - output_api.PresubmitError( - 'You added one or more #includes that violate checkdeps rules.' - '\nCheck that the DEPS files in these locations contain valid ' - 'rules.\nSee ' - 'https://cs.chromium.org/chromium/src/buildtools/checkdeps/ ' - 'for more details about checkdeps.', error_descriptions)) - if warning_descriptions: - results.append( - output_api.PresubmitPromptOrNotify( - 'You added one or more #includes of files that are temporarily' - '\nallowed but being removed. Can you avoid introducing the\n' - '#include? See relevant DEPS file(s) for details and contacts.' - '\nSee ' - 'https://cs.chromium.org/chromium/src/buildtools/checkdeps/ ' - 'for more details about checkdeps.', warning_descriptions)) - return results + # Copied from Chromium's src/PRESUBMIT.py. + + # We need to wait until we have an input_api object and use this + # roundabout construct to import checkdeps because this file is + # eval-ed and thus doesn't have __file__. + src_path = FindSrcDirPath(input_api.PresubmitLocalPath()) + checkdeps_path = input_api.os_path.join(src_path, 'buildtools', 'checkdeps') + if not os.path.exists(checkdeps_path): + return [ + output_api.PresubmitError( + 'Cannot find checkdeps at %s\nHave you run "gclient sync" to ' + 'download all the DEPS entries?' % checkdeps_path) + ] + with _AddToPath(checkdeps_path): + import checkdeps + from cpp_checker import CppChecker + from rules import Rule + + added_includes = [] + for f in input_api.AffectedFiles(file_filter=source_file_filter): + if not CppChecker.IsCppFile(f.LocalPath()): + continue + + changed_lines = [line for _, line in f.ChangedContents()] + added_includes.append([f.LocalPath(), changed_lines]) + + deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath()) + + error_descriptions = [] + warning_descriptions = [] + for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes( + added_includes): + description_with_path = '%s\n %s' % (path, rule_description) + if rule_type == Rule.DISALLOW: + error_descriptions.append(description_with_path) + else: + warning_descriptions.append(description_with_path) + + results = [] + if error_descriptions: + results.append( + output_api.PresubmitError( + 'You added one or more #includes that violate checkdeps rules.' + '\nCheck that the DEPS files in these locations contain valid ' + 'rules.\nSee ' + 'https://cs.chromium.org/chromium/src/buildtools/checkdeps/ ' + 'for more details about checkdeps.', error_descriptions)) + if warning_descriptions: + results.append( + output_api.PresubmitPromptOrNotify( + 'You added one or more #includes of files that are temporarily' + '\nallowed but being removed. Can you avoid introducing the\n' + '#include? See relevant DEPS file(s) for details and contacts.' + '\nSee ' + 'https://cs.chromium.org/chromium/src/buildtools/checkdeps/ ' + 'for more details about checkdeps.', warning_descriptions)) + return results def CheckCommitMessageBugEntry(input_api, output_api): - """Check that bug entries are well-formed in commit message.""" - bogus_bug_msg = ( - 'Bogus Bug entry: %s. Please specify the issue tracker prefix and the ' - 'issue number, separated by a colon, e.g. webrtc:123 or chromium:12345.' - ) - results = [] - for bug in input_api.change.BugsFromDescription(): - bug = bug.strip() - if bug.lower() == 'none': - continue - if 'b/' not in bug and ':' not in bug: - try: - if int(bug) > 100000: - # Rough indicator for current chromium bugs. - prefix_guess = 'chromium' - else: - prefix_guess = 'webrtc' - results.append( - 'Bug entry requires issue tracker prefix, e.g. %s:%s' % - (prefix_guess, bug)) - except ValueError: - results.append(bogus_bug_msg % bug) - elif not (re.match(r'\w+:\d+', bug) or re.match(r'b/\d+', bug)): - results.append(bogus_bug_msg % bug) - return [output_api.PresubmitError(r) for r in results] + """Check that bug entries are well-formed in commit message.""" + bogus_bug_msg = ( + 'Bogus Bug entry: %s. Please specify the issue tracker prefix and the ' + 'issue number, separated by a colon, e.g. webrtc:123 or chromium:12345.') + results = [] + for bug in input_api.change.BugsFromDescription(): + bug = bug.strip() + if bug.lower() == 'none': + continue + if 'b/' not in bug and ':' not in bug: + try: + if int(bug) > 100000: + # Rough indicator for current chromium bugs. + prefix_guess = 'chromium' + else: + prefix_guess = 'webrtc' + results.append('Bug entry requires issue tracker prefix, e.g. %s:%s' % + (prefix_guess, bug)) + except ValueError: + results.append(bogus_bug_msg % bug) + elif not (re.match(r'\w+:\d+', bug) or re.match(r'b/\d+', bug)): + results.append(bogus_bug_msg % bug) + return [output_api.PresubmitError(r) for r in results] def CheckChangeHasBugField(input_api, output_api): - """Requires that the changelist is associated with a bug. + """Requires that the changelist is associated with a bug. This check is stricter than the one in depot_tools/presubmit_canned_checks.py since it fails the presubmit if the bug field is missing or doesn't contain @@ -786,271 +776,279 @@ def CheckChangeHasBugField(input_api, output_api): This supports both 'BUG=' and 'Bug:' since we are in the process of migrating to Gerrit and it encourages the usage of 'Bug:'. """ - if input_api.change.BugsFromDescription(): - return [] - else: - return [ - output_api.PresubmitError( - 'The "Bug: [bug number]" footer is mandatory. Please create a ' - 'bug and reference it using either of:\n' - ' * https://bugs.webrtc.org - reference it using Bug: ' - 'webrtc:XXXX\n' - ' * https://crbug.com - reference it using Bug: chromium:XXXXXX' - ) - ] + if input_api.change.BugsFromDescription(): + return [] + return [ + output_api.PresubmitError( + 'The "Bug: [bug number]" footer is mandatory. Please create a ' + 'bug and reference it using either of:\n' + ' * https://bugs.webrtc.org - reference it using Bug: ' + 'webrtc:XXXX\n' + ' * https://crbug.com - reference it using Bug: chromium:XXXXXX') + ] def CheckJSONParseErrors(input_api, output_api, source_file_filter): - """Check that JSON files do not contain syntax errors.""" - - def FilterFile(affected_file): - return (input_api.os_path.splitext( - affected_file.LocalPath())[1] == '.json' - and source_file_filter(affected_file)) - - def GetJSONParseError(input_api, filename): - try: - contents = input_api.ReadFile(filename) - input_api.json.loads(contents) - except ValueError as e: - return e - return None - - results = [] - for affected_file in input_api.AffectedFiles(file_filter=FilterFile, - include_deletes=False): - parse_error = GetJSONParseError(input_api, - affected_file.AbsoluteLocalPath()) - if parse_error: - results.append( - output_api.PresubmitError( - '%s could not be parsed: %s' % - (affected_file.LocalPath(), parse_error))) - return results + """Check that JSON files do not contain syntax errors.""" + def FilterFile(affected_file): + return (input_api.os_path.splitext(affected_file.LocalPath())[1] == '.json' + and source_file_filter(affected_file)) + + def GetJSONParseError(input_api, filename): + try: + contents = input_api.ReadFile(filename) + input_api.json.loads(contents) + except ValueError as e: + return e + return None + + results = [] + for affected_file in input_api.AffectedFiles(file_filter=FilterFile, + include_deletes=False): + parse_error = GetJSONParseError(input_api, + affected_file.AbsoluteLocalPath()) + if parse_error: + results.append( + output_api.PresubmitError('%s could not be parsed: %s' % + (affected_file.LocalPath(), parse_error))) + return results -def RunPythonTests(input_api, output_api): - def Join(*args): - return input_api.os_path.join(input_api.PresubmitLocalPath(), *args) - - test_directories = [ - input_api.PresubmitLocalPath(), - Join('rtc_tools', 'py_event_log_analyzer'), - Join('audio', 'test', 'unittests'), - ] + [ - root for root, _, files in os.walk(Join('tools_webrtc')) if any( - f.endswith('_test.py') for f in files) - ] - tests = [] - for directory in test_directories: - tests.extend( - input_api.canned_checks.GetUnitTestsInDirectory( - input_api, - output_api, - directory, - files_to_check=[r'.+_test\.py$'])) - return input_api.RunTests(tests, parallel=True) +def RunPythonTests(input_api, output_api): + def Join(*args): + return input_api.os_path.join(input_api.PresubmitLocalPath(), *args) + + test_directories = [ + input_api.PresubmitLocalPath(), + Join('rtc_tools', 'py_event_log_analyzer'), + Join('audio', 'test', 'unittests'), + ] + [ + root for root, _, files in os.walk(Join('tools_webrtc')) if any( + f.endswith('_test.py') for f in files) + ] + + tests = [] + skipped_tests = [ + # This test should be run manually after webrtc_dashboard_upload target + # has been built. + r'catapult_uploader_test\.py$' + ] + + for directory in test_directories: + tests.extend( + input_api.canned_checks.GetUnitTestsInDirectory( + input_api, + output_api, + directory, + files_to_check=[r'.+_test\.py$'], + files_to_skip=skipped_tests, + run_on_python2=False)) + return input_api.RunTests(tests, parallel=True) def CheckUsageOfGoogleProtobufNamespace(input_api, output_api, source_file_filter): - """Checks that the namespace google::protobuf has not been used.""" - files = [] - pattern = input_api.re.compile(r'google::protobuf') - proto_utils_path = os.path.join('rtc_base', 'protobuf_utils.h') - file_filter = lambda x: (input_api.FilterSourceFile(x) and - source_file_filter(x)) - for f in input_api.AffectedSourceFiles(file_filter): - if f.LocalPath() in [proto_utils_path, 'PRESUBMIT.py']: - continue - contents = input_api.ReadFile(f) - if pattern.search(contents): - files.append(f) - - if files: - return [ - output_api.PresubmitError( - 'Please avoid to use namespace `google::protobuf` directly.\n' - 'Add a using directive in `%s` and include that header instead.' - % proto_utils_path, files) - ] - return [] + """Checks that the namespace google::protobuf has not been used.""" + files = [] + pattern = input_api.re.compile(r'google::protobuf') + proto_utils_path = os.path.join('rtc_base', 'protobuf_utils.h') + file_filter = lambda x: (input_api.FilterSourceFile(x) and source_file_filter( + x)) + for f in input_api.AffectedSourceFiles(file_filter): + if f.LocalPath() in [proto_utils_path, 'PRESUBMIT.py']: + continue + contents = input_api.ReadFile(f) + if pattern.search(contents): + files.append(f) + + if files: + return [ + output_api.PresubmitError( + 'Please avoid to use namespace `google::protobuf` directly.\n' + 'Add a using directive in `%s` and include that header instead.' % + proto_utils_path, files) + ] + return [] def _LicenseHeader(input_api): - """Returns the license header regexp.""" - # Accept any year number from 2003 to the current year - current_year = int(input_api.time.strftime('%Y')) - allowed_years = (str(s) for s in reversed(xrange(2003, current_year + 1))) - years_re = '(' + '|'.join(allowed_years) + ')' - license_header = ( - r'.*? Copyright( \(c\))? %(year)s The WebRTC [Pp]roject [Aa]uthors\. ' - r'All [Rr]ights [Rr]eserved\.\n' - r'.*?\n' - r'.*? Use of this source code is governed by a BSD-style license\n' - r'.*? that can be found in the LICENSE file in the root of the source\n' - r'.*? tree\. An additional intellectual property rights grant can be ' - r'found\n' - r'.*? in the file PATENTS\. All contributing project authors may\n' - r'.*? be found in the AUTHORS file in the root of the source tree\.\n' - ) % { - 'year': years_re, - } - return license_header + """Returns the license header regexp.""" + # Accept any year number from 2003 to the current year + current_year = int(input_api.time.strftime('%Y')) + allowed_years = (str(s) for s in reversed(range(2003, current_year + 1))) + years_re = '(' + '|'.join(allowed_years) + ')' + license_header = ( + r'.*? Copyright( \(c\))? %(year)s The WebRTC [Pp]roject [Aa]uthors\. ' + r'All [Rr]ights [Rr]eserved\.\n' + r'.*?\n' + r'.*? Use of this source code is governed by a BSD-style license\n' + r'.*? that can be found in the LICENSE file in the root of the source\n' + r'.*? tree\. An additional intellectual property rights grant can be ' + r'found\n' + r'.*? in the file PATENTS\. All contributing project authors may\n' + r'.*? be found in the AUTHORS file in the root of the source tree\.\n' + ) % { + 'year': years_re, + } + return license_header def CommonChecks(input_api, output_api): - """Checks common to both upload and commit.""" - results = [] - # Filter out files that are in objc or ios dirs from being cpplint-ed since - # they do not follow C++ lint rules. - exception_list = input_api.DEFAULT_FILES_TO_SKIP + ( - r".*\bobjc[\\\/].*", - r".*objc\.[hcm]+$", - ) - source_file_filter = lambda x: input_api.FilterSourceFile( - x, None, exception_list) - results.extend( - CheckApprovedFilesLintClean(input_api, output_api, source_file_filter)) - results.extend( - input_api.canned_checks.CheckLicense(input_api, output_api, - _LicenseHeader(input_api))) - - # TODO(bugs.webrtc.org/12114): Delete this filter and run pylint on - # all python files. This is a temporary solution. - python_file_filter = lambda f: (f.LocalPath().endswith('.py') and - source_file_filter(f)) - python_changed_files = [f.LocalPath() for f in input_api.AffectedFiles( - file_filter=python_file_filter)] - - results.extend( - input_api.canned_checks.RunPylint( - input_api, - output_api, - files_to_check=python_changed_files, - files_to_skip=( - r'^base[\\\/].*\.py$', - r'^build[\\\/].*\.py$', - r'^buildtools[\\\/].*\.py$', - r'^infra[\\\/].*\.py$', - r'^ios[\\\/].*\.py$', - r'^out.*[\\\/].*\.py$', - r'^testing[\\\/].*\.py$', - r'^third_party[\\\/].*\.py$', - r'^tools[\\\/].*\.py$', - # TODO(phoglund): should arguably be checked. - r'^tools_webrtc[\\\/]mb[\\\/].*\.py$', - r'^xcodebuild.*[\\\/].*\.py$', - ), - pylintrc='pylintrc')) - - # TODO(nisse): talk/ is no more, so make below checks simpler? - # WebRTC can't use the presubmit_canned_checks.PanProjectChecks function - # since we need to have different license checks - # in talk/ and webrtc/directories. - # Instead, hand-picked checks are included below. - - # .m and .mm files are ObjC files. For simplicity we will consider - # .h files in ObjC subdirectories ObjC headers. - objc_filter_list = (r'.+\.m$', r'.+\.mm$', r'.+objc\/.+\.h$') - # Skip long-lines check for DEPS and GN files. - build_file_filter_list = (r'.+\.gn$', r'.+\.gni$', 'DEPS') - # Also we will skip most checks for third_party directory. - third_party_filter_list = (r'^third_party[\\\/].+', ) - eighty_char_sources = lambda x: input_api.FilterSourceFile( - x, - files_to_skip=build_file_filter_list + objc_filter_list + - third_party_filter_list) - hundred_char_sources = lambda x: input_api.FilterSourceFile( - x, files_to_check=objc_filter_list) - non_third_party_sources = lambda x: input_api.FilterSourceFile( - x, files_to_skip=third_party_filter_list) - - results.extend( - input_api.canned_checks.CheckLongLines( - input_api, - output_api, - maxlen=80, - source_file_filter=eighty_char_sources)) - results.extend( - input_api.canned_checks.CheckLongLines( - input_api, - output_api, - maxlen=100, - source_file_filter=hundred_char_sources)) - results.extend( - input_api.canned_checks.CheckChangeHasNoTabs( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend( - input_api.canned_checks.CheckChangeHasNoStrayWhitespace( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend( - input_api.canned_checks.CheckAuthorizedAuthor( - input_api, - output_api, - bot_allowlist=[ - 'chromium-webrtc-autoroll@webrtc-ci.iam.gserviceaccount.com', - 'webrtc-version-updater@webrtc-ci.iam.gserviceaccount.com', - ])) - results.extend( - input_api.canned_checks.CheckChangeTodoHasOwner( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend( - input_api.canned_checks.CheckPatchFormatted(input_api, output_api)) - results.extend(CheckNativeApiHeaderChanges(input_api, output_api)) - results.extend( - CheckNoIOStreamInHeaders(input_api, - output_api, - source_file_filter=non_third_party_sources)) - results.extend( - CheckNoPragmaOnce(input_api, - output_api, - source_file_filter=non_third_party_sources)) - results.extend( - CheckNoFRIEND_TEST(input_api, - output_api, - source_file_filter=non_third_party_sources)) - results.extend(CheckGnChanges(input_api, output_api)) - results.extend( - CheckUnwantedDependencies(input_api, - output_api, - source_file_filter=non_third_party_sources)) - results.extend( - CheckJSONParseErrors(input_api, - output_api, - source_file_filter=non_third_party_sources)) - results.extend(RunPythonTests(input_api, output_api)) - results.extend( - CheckUsageOfGoogleProtobufNamespace( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend( - CheckOrphanHeaders(input_api, + """Checks common to both upload and commit.""" + results = [] + # Filter out files that are in objc or ios dirs from being cpplint-ed since + # they do not follow C++ lint rules. + exception_list = input_api.DEFAULT_FILES_TO_SKIP + ( + r".*\bobjc[\\\/].*", + r".*objc\.[hcm]+$", + ) + source_file_filter = lambda x: input_api.FilterSourceFile( + x, None, exception_list) + results.extend( + CheckApprovedFilesLintClean(input_api, output_api, source_file_filter)) + results.extend( + input_api.canned_checks.CheckLicense(input_api, output_api, + _LicenseHeader(input_api))) + + # TODO(bugs.webrtc.org/12114): Delete this filter and run pylint on + # all python files. This is a temporary solution. + python_file_filter = lambda f: (f.LocalPath().endswith('.py') and + source_file_filter(f)) + python_changed_files = [ + f.LocalPath() + for f in input_api.AffectedFiles(include_deletes=False, + file_filter=python_file_filter) + ] + + results.extend( + input_api.canned_checks.RunPylint( + input_api, + output_api, + files_to_check=python_changed_files, + files_to_skip=( + r'^base[\\\/].*\.py$', + r'^build[\\\/].*\.py$', + r'^buildtools[\\\/].*\.py$', + r'^infra[\\\/].*\.py$', + r'^ios[\\\/].*\.py$', + r'^out.*[\\\/].*\.py$', + r'^testing[\\\/].*\.py$', + r'^third_party[\\\/].*\.py$', + r'^tools[\\\/].*\.py$', + # TODO(bugs.webrtc.org/13605): should arguably be checked. + r'^tools_webrtc[\\\/]mb[\\\/].*\.py$', + r'^xcodebuild.*[\\\/].*\.py$', + ), + pylintrc='pylintrc', + version='2.7')) + + # TODO(bugs.webrtc.org/13606): talk/ is no more, so make below checks simpler? + # WebRTC can't use the presubmit_canned_checks.PanProjectChecks function + # since we need to have different license checks + # in talk/ and webrtc/directories. + # Instead, hand-picked checks are included below. + + # .m and .mm files are ObjC files. For simplicity we will consider + # .h files in ObjC subdirectories ObjC headers. + objc_filter_list = (r'.+\.m$', r'.+\.mm$', r'.+objc\/.+\.h$') + # Skip long-lines check for DEPS and GN files. + build_file_filter_list = (r'.+\.gn$', r'.+\.gni$', 'DEPS') + # Also we will skip most checks for third_party directory. + third_party_filter_list = (r'^third_party[\\\/].+', ) + eighty_char_sources = lambda x: input_api.FilterSourceFile( + x, + files_to_skip=build_file_filter_list + objc_filter_list + + third_party_filter_list) + hundred_char_sources = lambda x: input_api.FilterSourceFile( + x, files_to_check=objc_filter_list) + non_third_party_sources = lambda x: input_api.FilterSourceFile( + x, files_to_skip=third_party_filter_list) + + results.extend( + input_api.canned_checks.CheckLongLines( + input_api, + output_api, + maxlen=80, + source_file_filter=eighty_char_sources)) + results.extend( + input_api.canned_checks.CheckLongLines( + input_api, + output_api, + maxlen=100, + source_file_filter=hundred_char_sources)) + results.extend( + input_api.canned_checks.CheckChangeHasNoTabs( + input_api, output_api, source_file_filter=non_third_party_sources)) + results.extend( + input_api.canned_checks.CheckChangeHasNoStrayWhitespace( + input_api, output_api, source_file_filter=non_third_party_sources)) + results.extend( + input_api.canned_checks.CheckAuthorizedAuthor( + input_api, + output_api, + bot_allowlist=[ + 'chromium-webrtc-autoroll@webrtc-ci.iam.gserviceaccount.com', + 'webrtc-version-updater@webrtc-ci.iam.gserviceaccount.com', + ])) + results.extend( + input_api.canned_checks.CheckChangeTodoHasOwner( + input_api, output_api, source_file_filter=non_third_party_sources)) + results.extend( + input_api.canned_checks.CheckPatchFormatted(input_api, output_api)) + results.extend(CheckNativeApiHeaderChanges(input_api, output_api)) + results.extend( + CheckNoIOStreamInHeaders(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend( + CheckNoPragmaOnce(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend( + CheckNoFRIEND_TEST(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend(CheckGnChanges(input_api, output_api)) + results.extend( + CheckUnwantedDependencies(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend( + CheckJSONParseErrors(input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend( - CheckNewlineAtTheEndOfProtoFiles( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend( - CheckNoStreamUsageIsAdded(input_api, output_api, + results.extend(RunPythonTests(input_api, output_api)) + results.extend( + CheckUsageOfGoogleProtobufNamespace( + input_api, output_api, source_file_filter=non_third_party_sources)) + results.extend( + CheckOrphanHeaders(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend( + CheckNewlineAtTheEndOfProtoFiles( + input_api, output_api, source_file_filter=non_third_party_sources)) + results.extend( + CheckNoStreamUsageIsAdded(input_api, output_api, non_third_party_sources)) + results.extend( + CheckNoTestCaseUsageIsAdded(input_api, output_api, non_third_party_sources)) - results.extend( - CheckNoTestCaseUsageIsAdded(input_api, output_api, - non_third_party_sources)) - results.extend(CheckAddedDepsHaveTargetApprovals(input_api, output_api)) - results.extend(CheckApiDepsFileIsUpToDate(input_api, output_api)) - results.extend( - CheckAbslMemoryInclude(input_api, output_api, non_third_party_sources)) - results.extend( - CheckBannedAbslMakeUnique(input_api, output_api, - non_third_party_sources)) - results.extend( - CheckObjcApiSymbols(input_api, output_api, non_third_party_sources)) - return results + results.extend(CheckAddedDepsHaveTargetApprovals(input_api, output_api)) + results.extend(CheckApiDepsFileIsUpToDate(input_api, output_api)) + results.extend( + CheckAbslMemoryInclude(input_api, output_api, non_third_party_sources)) + results.extend( + CheckAssertUsage(input_api, output_api, non_third_party_sources)) + results.extend( + CheckBannedAbslMakeUnique(input_api, output_api, non_third_party_sources)) + results.extend( + CheckObjcApiSymbols(input_api, output_api, non_third_party_sources)) + return results def CheckApiDepsFileIsUpToDate(input_api, output_api): - """Check that 'include_rules' in api/DEPS is up to date. + """Check that 'include_rules' in api/DEPS is up to date. The file api/DEPS must be kept up to date in order to avoid to avoid to include internal header from WebRTC's api/ headers. @@ -1059,366 +1057,378 @@ def CheckApiDepsFileIsUpToDate(input_api, output_api): rule for each root level directory. More focused allow rules can be added to 'specific_include_rules'. """ - results = [] - api_deps = os.path.join(input_api.PresubmitLocalPath(), 'api', 'DEPS') - with open(api_deps) as f: - deps_content = _ParseDeps(f.read()) - - include_rules = deps_content.get('include_rules', []) - dirs_to_skip = set(['api', 'docs']) - - # Only check top level directories affected by the current CL. - dirs_to_check = set() - for f in input_api.AffectedFiles(): - path_tokens = [t for t in f.LocalPath().split(os.sep) if t] - if len(path_tokens) > 1: - if (path_tokens[0] not in dirs_to_skip and os.path.isdir( - os.path.join(input_api.PresubmitLocalPath(), - path_tokens[0]))): - dirs_to_check.add(path_tokens[0]) - - missing_include_rules = set() - for p in dirs_to_check: - rule = '-%s' % p - if rule not in include_rules: - missing_include_rules.add(rule) - - if missing_include_rules: - error_msg = [ - 'include_rules = [\n', - ' ...\n', - ] - - for r in sorted(missing_include_rules): - error_msg.append(' "%s",\n' % str(r)) - - error_msg.append(' ...\n') - error_msg.append(']\n') + results = [] + api_deps = os.path.join(input_api.PresubmitLocalPath(), 'api', 'DEPS') + with open(api_deps) as f: + deps_content = _ParseDeps(f.read()) + + include_rules = deps_content.get('include_rules', []) + dirs_to_skip = set(['api', 'docs']) + + # Only check top level directories affected by the current CL. + dirs_to_check = set() + for f in input_api.AffectedFiles(): + path_tokens = [t for t in f.LocalPath().split(os.sep) if t] + if len(path_tokens) > 1: + if (path_tokens[0] not in dirs_to_skip and os.path.isdir( + os.path.join(input_api.PresubmitLocalPath(), path_tokens[0]))): + dirs_to_check.add(path_tokens[0]) + + missing_include_rules = set() + for p in dirs_to_check: + rule = '-%s' % p + if rule not in include_rules: + missing_include_rules.add(rule) + + if missing_include_rules: + error_msg = [ + 'include_rules = [\n', + ' ...\n', + ] - results.append( - output_api.PresubmitError( - 'New root level directory detected! WebRTC api/ headers should ' - 'not #include headers from \n' - 'the new directory, so please update "include_rules" in file\n' - '"%s". Example:\n%s\n' % (api_deps, ''.join(error_msg)))) + for r in sorted(missing_include_rules): + error_msg.append(' "%s",\n' % str(r)) - return results + error_msg.append(' ...\n') + error_msg.append(']\n') + + results.append( + output_api.PresubmitError( + 'New root level directory detected! WebRTC api/ headers should ' + 'not #include headers from \n' + 'the new directory, so please update "include_rules" in file\n' + '"%s". Example:\n%s\n' % (api_deps, ''.join(error_msg)))) + + return results def CheckBannedAbslMakeUnique(input_api, output_api, source_file_filter): - file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and - source_file_filter(f)) - - files = [] - for f in input_api.AffectedFiles(include_deletes=False, - file_filter=file_filter): - for _, line in f.ChangedContents(): - if 'absl::make_unique' in line: - files.append(f) - break - - if len(files): - return [ - output_api.PresubmitError( - 'Please use std::make_unique instead of absl::make_unique.\n' - 'Affected files:', files) - ] - return [] + file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and + source_file_filter(f)) + + files = [] + for f in input_api.AffectedFiles(include_deletes=False, + file_filter=file_filter): + for _, line in f.ChangedContents(): + if 'absl::make_unique' in line: + files.append(f) + break + + if files: + return [ + output_api.PresubmitError( + 'Please use std::make_unique instead of absl::make_unique.\n' + 'Affected files:', files) + ] + return [] def CheckObjcApiSymbols(input_api, output_api, source_file_filter): - rtc_objc_export = re.compile(r'RTC_OBJC_EXPORT(.|\n){26}', - re.MULTILINE | re.DOTALL) - file_filter = lambda f: (f.LocalPath().endswith(('.h')) and - source_file_filter(f)) - - files = [] - file_filter = lambda x: (input_api.FilterSourceFile(x) and - source_file_filter(x)) - for f in input_api.AffectedSourceFiles(file_filter): - if not f.LocalPath().endswith('.h') or not 'sdk/objc' in f.LocalPath(): - continue - if f.LocalPath().endswith('sdk/objc/base/RTCMacros.h'): - continue - contents = input_api.ReadFile(f) - for match in rtc_objc_export.finditer(contents): - export_block = match.group(0) - if 'RTC_OBJC_TYPE' not in export_block: - files.append(f.LocalPath()) - - if len(files): - return [ - output_api.PresubmitError( - 'RTC_OBJC_EXPORT types must be wrapped into an RTC_OBJC_TYPE() ' - + 'macro.\n\n' + 'For example:\n' + - 'RTC_OBJC_EXPORT @protocol RTC_OBJC_TYPE(RtcFoo)\n\n' + - 'RTC_OBJC_EXPORT @interface RTC_OBJC_TYPE(RtcFoo)\n\n' + - 'Please fix the following files:', files) - ] - return [] + rtc_objc_export = re.compile(r'RTC_OBJC_EXPORT(.|\n){26}', + re.MULTILINE | re.DOTALL) + file_filter = lambda f: (f.LocalPath().endswith(('.h')) and + source_file_filter(f)) + + files = [] + file_filter = lambda x: (input_api.FilterSourceFile(x) and source_file_filter( + x)) + for f in input_api.AffectedSourceFiles(file_filter): + if not f.LocalPath().endswith('.h') or not 'sdk/objc' in f.LocalPath(): + continue + if f.LocalPath().endswith('sdk/objc/base/RTCMacros.h'): + continue + contents = input_api.ReadFile(f) + for match in rtc_objc_export.finditer(contents): + export_block = match.group(0) + if 'RTC_OBJC_TYPE' not in export_block: + files.append(f.LocalPath()) + + if len(files) > 0: + return [ + output_api.PresubmitError( + 'RTC_OBJC_EXPORT types must be wrapped into an RTC_OBJC_TYPE() ' + + 'macro.\n\n' + 'For example:\n' + + 'RTC_OBJC_EXPORT @protocol RTC_OBJC_TYPE(RtcFoo)\n\n' + + 'RTC_OBJC_EXPORT @interface RTC_OBJC_TYPE(RtcFoo)\n\n' + + 'Please fix the following files:', files) + ] + return [] + + +def CheckAssertUsage(input_api, output_api, source_file_filter): + pattern = input_api.re.compile(r'\bassert\(') + file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h', '.m', '.mm')) + and source_file_filter(f)) + + files = [] + for f in input_api.AffectedFiles(include_deletes=False, + file_filter=file_filter): + for _, line in f.ChangedContents(): + if pattern.search(line): + files.append(f.LocalPath()) + break + + if len(files) > 0: + return [ + output_api.PresubmitError( + 'Usage of assert() has been detected in the following files, ' + 'please use RTC_DCHECK() instead.\n Files:', files) + ] + return [] def CheckAbslMemoryInclude(input_api, output_api, source_file_filter): - pattern = input_api.re.compile(r'^#include\s*"absl/memory/memory.h"', - input_api.re.MULTILINE) - file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and - source_file_filter(f)) - - files = [] - for f in input_api.AffectedFiles(include_deletes=False, - file_filter=file_filter): - contents = input_api.ReadFile(f) - if pattern.search(contents): - continue - for _, line in f.ChangedContents(): - if 'absl::WrapUnique' in line: - files.append(f) - break - - if len(files): - return [ - output_api.PresubmitError( - 'Please include "absl/memory/memory.h" header for ' - 'absl::WrapUnique.\nThis header may or may not be included ' - 'transitively depending on the C++ standard version.', files) - ] - return [] + pattern = input_api.re.compile(r'^#include\s*"absl/memory/memory.h"', + input_api.re.MULTILINE) + file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and + source_file_filter(f)) + + files = [] + for f in input_api.AffectedFiles(include_deletes=False, + file_filter=file_filter): + contents = input_api.ReadFile(f) + if pattern.search(contents): + continue + for _, line in f.ChangedContents(): + if 'absl::WrapUnique' in line: + files.append(f) + break + + if len(files) > 0: + return [ + output_api.PresubmitError( + 'Please include "absl/memory/memory.h" header for ' + 'absl::WrapUnique.\nThis header may or may not be included ' + 'transitively depending on the C++ standard version.', files) + ] + return [] def CheckChangeOnUpload(input_api, output_api): - results = [] - results.extend(CommonChecks(input_api, output_api)) - results.extend(CheckGnGen(input_api, output_api)) - results.extend( - input_api.canned_checks.CheckGNFormatted(input_api, output_api)) - return results + results = [] + results.extend(CommonChecks(input_api, output_api)) + results.extend(CheckGnGen(input_api, output_api)) + results.extend(input_api.canned_checks.CheckGNFormatted( + input_api, output_api)) + return results def CheckChangeOnCommit(input_api, output_api): - results = [] - results.extend(CommonChecks(input_api, output_api)) - results.extend(VerifyNativeApiHeadersListIsValid(input_api, output_api)) - results.extend(input_api.canned_checks.CheckOwners(input_api, output_api)) - results.extend( - input_api.canned_checks.CheckChangeWasUploaded(input_api, output_api)) - results.extend( - input_api.canned_checks.CheckChangeHasDescription( - input_api, output_api)) - results.extend(CheckChangeHasBugField(input_api, output_api)) - results.extend(CheckCommitMessageBugEntry(input_api, output_api)) - results.extend( - input_api.canned_checks.CheckTreeIsOpen( - input_api, - output_api, - json_url='http://webrtc-status.appspot.com/current?format=json')) - return results + results = [] + results.extend(CommonChecks(input_api, output_api)) + results.extend(VerifyNativeApiHeadersListIsValid(input_api, output_api)) + results.extend(input_api.canned_checks.CheckOwners(input_api, output_api)) + results.extend( + input_api.canned_checks.CheckChangeWasUploaded(input_api, output_api)) + results.extend( + input_api.canned_checks.CheckChangeHasDescription(input_api, output_api)) + results.extend(CheckChangeHasBugField(input_api, output_api)) + results.extend(CheckCommitMessageBugEntry(input_api, output_api)) + results.extend( + input_api.canned_checks.CheckTreeIsOpen( + input_api, + output_api, + json_url='http://webrtc-status.appspot.com/current?format=json')) + return results def CheckOrphanHeaders(input_api, output_api, source_file_filter): - # We need to wait until we have an input_api object and use this - # roundabout construct to import prebubmit_checks_lib because this file is - # eval-ed and thus doesn't have __file__. - error_msg = """{} should be listed in {}.""" - results = [] - exempt_paths = [ - os.path.join('tools_webrtc', 'ios', 'SDK'), - ] - with _AddToPath( - input_api.os_path.join(input_api.PresubmitLocalPath(), - 'tools_webrtc', 'presubmit_checks_lib')): - from check_orphan_headers import GetBuildGnPathFromFilePath - from check_orphan_headers import IsHeaderInBuildGn - - file_filter = lambda x: input_api.FilterSourceFile( - x, files_to_skip=exempt_paths) and source_file_filter(x) - for f in input_api.AffectedSourceFiles(file_filter): - if f.LocalPath().endswith('.h'): - file_path = os.path.abspath(f.LocalPath()) - root_dir = os.getcwd() - gn_file_path = GetBuildGnPathFromFilePath(file_path, - os.path.exists, root_dir) - in_build_gn = IsHeaderInBuildGn(file_path, gn_file_path) - if not in_build_gn: - results.append( - output_api.PresubmitError( - error_msg.format(f.LocalPath(), - os.path.relpath(gn_file_path)))) - return results + # We need to wait until we have an input_api object and use this + # roundabout construct to import prebubmit_checks_lib because this file is + # eval-ed and thus doesn't have __file__. + error_msg = """{} should be listed in {}.""" + results = [] + exempt_paths = [re.escape(os.path.join('tools_webrtc', 'ios', 'SDK'))] + + with _AddToPath( + input_api.os_path.join(input_api.PresubmitLocalPath(), 'tools_webrtc', + 'presubmit_checks_lib')): + from check_orphan_headers import GetBuildGnPathFromFilePath + from check_orphan_headers import IsHeaderInBuildGn + + file_filter = lambda x: input_api.FilterSourceFile( + x, files_to_skip=exempt_paths) and source_file_filter(x) + for f in input_api.AffectedSourceFiles(file_filter): + if f.LocalPath().endswith('.h'): + file_path = os.path.abspath(f.LocalPath()) + root_dir = os.getcwd() + gn_file_path = GetBuildGnPathFromFilePath(file_path, os.path.exists, + root_dir) + in_build_gn = IsHeaderInBuildGn(file_path, gn_file_path) + if not in_build_gn: + results.append( + output_api.PresubmitError( + error_msg.format(f.LocalPath(), os.path.relpath(gn_file_path)))) + return results def CheckNewlineAtTheEndOfProtoFiles(input_api, output_api, source_file_filter): - """Checks that all .proto files are terminated with a newline.""" - error_msg = 'File {} must end with exactly one newline.' - results = [] - file_filter = lambda x: input_api.FilterSourceFile( - x, files_to_check=(r'.+\.proto$', )) and source_file_filter(x) - for f in input_api.AffectedSourceFiles(file_filter): - file_path = f.LocalPath() - with open(file_path) as f: - lines = f.readlines() - if len(lines) > 0 and not lines[-1].endswith('\n'): - results.append( - output_api.PresubmitError(error_msg.format(file_path))) - return results + """Checks that all .proto files are terminated with a newline.""" + error_msg = 'File {} must end with exactly one newline.' + results = [] + file_filter = lambda x: input_api.FilterSourceFile( + x, files_to_check=(r'.+\.proto$', )) and source_file_filter(x) + for f in input_api.AffectedSourceFiles(file_filter): + file_path = f.LocalPath() + with open(file_path) as f: + lines = f.readlines() + if len(lines) > 0 and not lines[-1].endswith('\n'): + results.append(output_api.PresubmitError(error_msg.format(file_path))) + return results def _ExtractAddRulesFromParsedDeps(parsed_deps): - """Extract the rules that add dependencies from a parsed DEPS file. + """Extract the rules that add dependencies from a parsed DEPS file. Args: parsed_deps: the locals dictionary from evaluating the DEPS file.""" - add_rules = set() + add_rules = set() + add_rules.update([ + rule[1:] for rule in parsed_deps.get('include_rules', []) + if rule.startswith('+') or rule.startswith('!') + ]) + for _, rules in parsed_deps.get('specific_include_rules', {}).items(): add_rules.update([ - rule[1:] for rule in parsed_deps.get('include_rules', []) + rule[1:] for rule in rules if rule.startswith('+') or rule.startswith('!') ]) - for _, rules in parsed_deps.get('specific_include_rules', {}).iteritems(): - add_rules.update([ - rule[1:] for rule in rules - if rule.startswith('+') or rule.startswith('!') - ]) - return add_rules + return add_rules def _ParseDeps(contents): - """Simple helper for parsing DEPS files.""" + """Simple helper for parsing DEPS files.""" - # Stubs for handling special syntax in the root DEPS file. - class VarImpl(object): - def __init__(self, local_scope): - self._local_scope = local_scope + # Stubs for handling special syntax in the root DEPS file. + class VarImpl: + def __init__(self, local_scope): + self._local_scope = local_scope - def Lookup(self, var_name): - """Implements the Var syntax.""" - try: - return self._local_scope['vars'][var_name] - except KeyError: - raise Exception('Var is not defined: %s' % var_name) + def Lookup(self, var_name): + """Implements the Var syntax.""" + try: + return self._local_scope['vars'][var_name] + except KeyError as var_not_defined: + raise Exception('Var is not defined: %s' % + var_name) from var_not_defined - local_scope = {} - global_scope = { - 'Var': VarImpl(local_scope).Lookup, - } - exec contents in global_scope, local_scope - return local_scope + local_scope = {} + global_scope = { + 'Var': VarImpl(local_scope).Lookup, + } + exec(contents, global_scope, local_scope) + return local_scope def _CalculateAddedDeps(os_path, old_contents, new_contents): - """Helper method for _CheckAddedDepsHaveTargetApprovals. Returns + """Helper method for _CheckAddedDepsHaveTargetApprovals. Returns a set of DEPS entries that we should look up. For a directory (rather than a specific filename) we fake a path to a specific filename by adding /DEPS. This is chosen as a file that will seldom or never be subject to per-file include_rules. """ - # We ignore deps entries on auto-generated directories. - auto_generated_dirs = ['grit', 'jni'] + # We ignore deps entries on auto-generated directories. + auto_generated_dirs = ['grit', 'jni'] - old_deps = _ExtractAddRulesFromParsedDeps(_ParseDeps(old_contents)) - new_deps = _ExtractAddRulesFromParsedDeps(_ParseDeps(new_contents)) + old_deps = _ExtractAddRulesFromParsedDeps(_ParseDeps(old_contents)) + new_deps = _ExtractAddRulesFromParsedDeps(_ParseDeps(new_contents)) - added_deps = new_deps.difference(old_deps) + added_deps = new_deps.difference(old_deps) - results = set() - for added_dep in added_deps: - if added_dep.split('/')[0] in auto_generated_dirs: - continue - # Assume that a rule that ends in .h is a rule for a specific file. - if added_dep.endswith('.h'): - results.add(added_dep) - else: - results.add(os_path.join(added_dep, 'DEPS')) - return results + results = set() + for added_dep in added_deps: + if added_dep.split('/')[0] in auto_generated_dirs: + continue + # Assume that a rule that ends in .h is a rule for a specific file. + if added_dep.endswith('.h'): + results.add(added_dep) + else: + results.add(os_path.join(added_dep, 'DEPS')) + return results def CheckAddedDepsHaveTargetApprovals(input_api, output_api): - """When a dependency prefixed with + is added to a DEPS file, we + """When a dependency prefixed with + is added to a DEPS file, we want to make sure that the change is reviewed by an OWNER of the target file or directory, to avoid layering violations from being introduced. This check verifies that this happens. """ - virtual_depended_on_files = set() - - file_filter = lambda f: not input_api.re.match( - r"^third_party[\\\/](WebKit|blink)[\\\/].*", f.LocalPath()) - for f in input_api.AffectedFiles(include_deletes=False, - file_filter=file_filter): - filename = input_api.os_path.basename(f.LocalPath()) - if filename == 'DEPS': - virtual_depended_on_files.update( - _CalculateAddedDeps(input_api.os_path, - '\n'.join(f.OldContents()), - '\n'.join(f.NewContents()))) - - if not virtual_depended_on_files: - return [] - - if input_api.is_committing: - if input_api.tbr: - return [ - output_api.PresubmitNotifyResult( - '--tbr was specified, skipping OWNERS check for DEPS ' - 'additions' - ) - ] - if input_api.dry_run: - return [ - output_api.PresubmitNotifyResult( - 'This is a dry run, skipping OWNERS check for DEPS ' - 'additions' - ) - ] - if not input_api.change.issue: - return [ - output_api.PresubmitError( - "DEPS approval by OWNERS check failed: this change has " - "no change number, so we can't check it for approvals.") - ] - output = output_api.PresubmitError - else: - output = output_api.PresubmitNotifyResult - - owner_email, reviewers = ( - input_api.canned_checks.GetCodereviewOwnerAndReviewers( - input_api, - None, - approval_needed=input_api.is_committing)) - - owner_email = owner_email or input_api.change.author_email - - approval_status = input_api.owners_client.GetFilesApprovalStatus( - virtual_depended_on_files, reviewers.union([owner_email]), []) - missing_files = [ - f for f in virtual_depended_on_files - if approval_status[f] != input_api.owners_client.APPROVED] - - # We strip the /DEPS part that was added by - # _FilesToCheckForIncomingDeps to fake a path to a file in a - # directory. - def StripDeps(path): - start_deps = path.rfind('/DEPS') - if start_deps != -1: - return path[:start_deps] - else: - return path + virtual_depended_on_files = set() + + file_filter = lambda f: not input_api.re.match( + r"^third_party[\\\/](WebKit|blink)[\\\/].*", f.LocalPath()) + for f in input_api.AffectedFiles(include_deletes=False, + file_filter=file_filter): + filename = input_api.os_path.basename(f.LocalPath()) + if filename == 'DEPS': + virtual_depended_on_files.update( + _CalculateAddedDeps(input_api.os_path, '\n'.join(f.OldContents()), + '\n'.join(f.NewContents()))) + + if not virtual_depended_on_files: + return [] - unapproved_dependencies = [ - "'+%s'," % StripDeps(path) for path in missing_files + if input_api.is_committing: + if input_api.tbr: + return [ + output_api.PresubmitNotifyResult( + '--tbr was specified, skipping OWNERS check for DEPS ' + 'additions') + ] + if input_api.dry_run: + return [ + output_api.PresubmitNotifyResult( + 'This is a dry run, skipping OWNERS check for DEPS ' + 'additions') + ] + if not input_api.change.issue: + return [ + output_api.PresubmitError( + "DEPS approval by OWNERS check failed: this change has " + "no change number, so we can't check it for approvals.") + ] + output = output_api.PresubmitError + else: + output = output_api.PresubmitNotifyResult + + owner_email, reviewers = ( + input_api.canned_checks.GetCodereviewOwnerAndReviewers( + input_api, None, approval_needed=input_api.is_committing)) + + owner_email = owner_email or input_api.change.author_email + + approval_status = input_api.owners_client.GetFilesApprovalStatus( + virtual_depended_on_files, reviewers.union([owner_email]), []) + missing_files = [ + f for f in virtual_depended_on_files + if approval_status[f] != input_api.owners_client.APPROVED + ] + + # We strip the /DEPS part that was added by + # _FilesToCheckForIncomingDeps to fake a path to a file in a + # directory. + def StripDeps(path): + start_deps = path.rfind('/DEPS') + if start_deps != -1: + return path[:start_deps] + return path + + unapproved_dependencies = [ + "'+%s'," % StripDeps(path) for path in missing_files + ] + + if unapproved_dependencies: + output_list = [ + output('You need LGTM from owners of depends-on paths in DEPS that ' + ' were modified in this CL:\n %s' % + '\n '.join(sorted(unapproved_dependencies))) ] - - if unapproved_dependencies: - output_list = [ - output( - 'You need LGTM from owners of depends-on paths in DEPS that ' - ' were modified in this CL:\n %s' % - '\n '.join(sorted(unapproved_dependencies))) - ] - suggested_owners = input_api.owners_client.SuggestOwners( - missing_files, exclude=[owner_email]) - output_list.append( - output('Suggested missing target path OWNERS:\n %s' % - '\n '.join(suggested_owners or []))) - return output_list - - return [] + suggested_owners = input_api.owners_client.SuggestOwners( + missing_files, exclude=[owner_email]) + output_list.append( + output('Suggested missing target path OWNERS:\n %s' % + '\n '.join(suggested_owners or []))) + return output_list + + return [] diff --git a/README.md b/README.md index 1ae9ea9cb7..f54f8813c5 100644 --- a/README.md +++ b/README.md @@ -35,5 +35,6 @@ native API header files. * [Coding style guide](g3doc/style-guide.md) * [Code of conduct](CODE_OF_CONDUCT.md) * [Reporting bugs](docs/bug-reporting.md) + * [Documentation](g3doc/sitemap.md) -[native-dev]: https://webrtc.googlesource.com/src/+/refs/heads/master/docs/native-code/index.md +[native-dev]: https://webrtc.googlesource.com/src/+/main/docs/native-code/index.md diff --git a/WATCHLISTS b/WATCHLISTS index 750a4c2164..11464b1f69 100644 --- a/WATCHLISTS +++ b/WATCHLISTS @@ -97,6 +97,9 @@ 'pc': { 'filepath': '^pc/.*', }, + 'logging' : { + 'filepath': 'logging/.*', + }, }, 'WATCHLISTS': { @@ -164,14 +167,12 @@ 'zhengzhonghou@agora.io'], 'bitrate_controller': ['mflodman@webrtc.org', 'stefan@webrtc.org', - 'srte@webrtc.org', 'zhuangzesen@agora.io'], - 'congestion_controller': ['srte@webrtc.org'], + 'congestion_controller': [], 'remote_bitrate_estimator': ['mflodman@webrtc.org', 'stefan@webrtc.org', 'zhuangzesen@agora.io'], 'pacing': ['mflodman@webrtc.org', - 'srte@webrtc.org', 'stefan@webrtc.org', 'zhuangzesen@agora.io'], 'rtp_rtcp': ['mflodman@webrtc.org', @@ -184,5 +185,6 @@ 'peah@webrtc.org', 'zhengzhonghou@agora.io'], 'pc': ['steveanton+watch@webrtc.org'], + 'logging': ['terelius@webrtc.org'], }, } diff --git a/api/BUILD.gn b/api/BUILD.gn index c775a1a871..12e0c5cafa 100644 --- a/api/BUILD.gn +++ b/api/BUILD.gn @@ -98,6 +98,12 @@ rtc_library("rtp_packet_info") { absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } +rtc_source_set("video_track_source_constraints") { + visibility = [ "*" ] + sources = [ "video_track_source_constraints.h" ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] +} + rtc_library("media_stream_interface") { visibility = [ "*" ] sources = [ @@ -110,6 +116,7 @@ rtc_library("media_stream_interface") { ":audio_options_api", ":rtp_parameters", ":scoped_refptr", + ":video_track_source_constraints", "../modules/audio_processing:audio_processing_statistics", "../rtc_base:checks", "../rtc_base:refcount", @@ -183,6 +190,7 @@ rtc_library("libjingle_peerconnection_api") { "crypto:frame_decryptor_interface", "crypto:frame_encryptor_interface", "crypto:options", + "metronome", "neteq:neteq_api", "rtc_event_log", "task_queue", @@ -255,6 +263,7 @@ rtc_source_set("packet_socket_factory") { ] deps = [ ":async_dns_resolver", + ":wrapping_async_dns_resolver", "../rtc_base:async_resolver_interface", "../rtc_base:rtc_base", "../rtc_base:socket_address", @@ -263,6 +272,7 @@ rtc_source_set("packet_socket_factory") { } rtc_source_set("async_dns_resolver") { + visibility = [ "*" ] sources = [ "async_dns_resolver.h" ] deps = [ "../rtc_base:socket_address", @@ -270,6 +280,28 @@ rtc_source_set("async_dns_resolver") { ] } +rtc_source_set("wrapping_async_dns_resolver") { + visibility = [ + ":*", + "../p2p:rtc_p2p", + ] + sources = [ + "wrapping_async_dns_resolver.cc", + "wrapping_async_dns_resolver.h", + ] + deps = [ + ":async_dns_resolver", + ":sequence_checker", + "../rtc_base:async_resolver_interface", + "../rtc_base:checks", + "../rtc_base:macromagic", + "../rtc_base:socket_address", + "../rtc_base:threading", + "../rtc_base/third_party/sigslot", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] +} + rtc_source_set("scoped_refptr") { visibility = [ "*" ] sources = [ "scoped_refptr.h" ] @@ -326,6 +358,7 @@ rtc_source_set("rtp_transceiver_direction") { } rtc_source_set("priority") { + visibility = [ "*" ] sources = [ "priority.h" ] } @@ -378,6 +411,16 @@ rtc_source_set("stats_observer_interface") { absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } +rtc_source_set("peer_network_dependencies") { + visibility = [ "*" ] + sources = [ "test/peer_network_dependencies.h" ] + deps = [ + ":packet_socket_factory", + "../rtc_base", + "../rtc_base:threading", + ] +} + rtc_source_set("peer_connection_quality_test_fixture_api") { visibility = [ "*" ] testonly = true @@ -393,6 +436,7 @@ rtc_source_set("peer_connection_quality_test_fixture_api") { ":media_stream_interface", ":network_state_predictor_api", ":packet_socket_factory", + ":peer_network_dependencies", ":rtp_parameters", ":simulated_network_api", ":stats_observer_interface", @@ -621,6 +665,8 @@ rtc_source_set("network_emulation_manager_api") { ] deps = [ ":array_view", + ":packet_socket_factory", + ":peer_network_dependencies", ":simulated_network_api", ":time_controller", "../call:simulated_network", @@ -800,7 +846,6 @@ if (rtc_include_tests) { "test/videocodec_test_stats.h", ] deps = [ - "../media:rtc_h264_profile_id", "../modules/video_coding:video_codec_interface", "../rtc_base:stringutils", "video:video_frame_type", @@ -825,6 +870,7 @@ if (rtc_include_tests) { } rtc_source_set("mock_audio_mixer") { + visibility = [ "*" ] testonly = true sources = [ "test/mock_audio_mixer.h" ] @@ -834,6 +880,18 @@ if (rtc_include_tests) { ] } + rtc_source_set("mock_audio_sink") { + visibility = [ "*" ] + testonly = true + sources = [ "test/mock_audio_sink.h" ] + + deps = [ + "../api:media_stream_interface", + "../test:test_support", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] + } + rtc_source_set("mock_data_channel") { visibility = [ "*" ] testonly = true @@ -846,6 +904,7 @@ if (rtc_include_tests) { } rtc_source_set("mock_fec_controller_override") { + visibility = [ "*" ] testonly = true sources = [ "test/mock_fec_controller_override.h" ] deps = [ @@ -855,6 +914,7 @@ if (rtc_include_tests) { } rtc_library("mock_frame_encryptor") { + visibility = [ "*" ] testonly = true sources = [ "test/mock_frame_encryptor.h" ] deps = [ @@ -866,6 +926,7 @@ if (rtc_include_tests) { } rtc_library("mock_frame_decryptor") { + visibility = [ "*" ] testonly = true sources = [ "test/mock_frame_decryptor.h" ] deps = [ @@ -876,6 +937,7 @@ if (rtc_include_tests) { } rtc_library("fake_frame_encryptor") { + visibility = [ "*" ] testonly = true sources = [ "test/fake_frame_encryptor.cc", @@ -892,6 +954,7 @@ if (rtc_include_tests) { } rtc_library("fake_frame_decryptor") { + visibility = [ "*" ] testonly = true sources = [ "test/fake_frame_decryptor.cc", @@ -938,6 +1001,8 @@ if (rtc_include_tests) { deps = [ ":libjingle_peerconnection_api", + "../api:scoped_refptr", + "../rtc_base:refcount", "../test:test_support", ] } @@ -954,6 +1019,7 @@ if (rtc_include_tests) { } rtc_source_set("mock_async_dns_resolver") { + visibility = [ "*" ] testonly = true sources = [ "test/mock_async_dns_resolver.h" ] deps = [ @@ -978,6 +1044,7 @@ if (rtc_include_tests) { } rtc_source_set("mock_transformable_video_frame") { + visibility = [ "*" ] testonly = true sources = [ "test/mock_transformable_video_frame.h" ] @@ -988,6 +1055,7 @@ if (rtc_include_tests) { } rtc_source_set("mock_video_bitrate_allocator") { + visibility = [ "*" ] testonly = true sources = [ "test/mock_video_bitrate_allocator.h" ] @@ -998,6 +1066,7 @@ if (rtc_include_tests) { } rtc_source_set("mock_video_bitrate_allocator_factory") { + visibility = [ "*" ] testonly = true sources = [ "test/mock_video_bitrate_allocator_factory.h" ] @@ -1008,6 +1077,7 @@ if (rtc_include_tests) { } rtc_source_set("mock_video_codec_factory") { + visibility = [ "*" ] testonly = true sources = [ "test/mock_video_decoder_factory.h", @@ -1022,7 +1092,6 @@ if (rtc_include_tests) { rtc_library("mock_video_decoder") { visibility = [ "*" ] - testonly = true sources = [ "test/mock_video_decoder.h" ] @@ -1034,7 +1103,6 @@ if (rtc_include_tests) { rtc_library("mock_video_encoder") { visibility = [ "*" ] - testonly = true sources = [ "test/mock_video_encoder.h" ] @@ -1044,6 +1112,19 @@ if (rtc_include_tests) { ] } + rtc_library("mock_video_track") { + visibility = [ "*" ] + testonly = true + sources = [ "test/mock_video_track.h" ] + + deps = [ + "../api:media_stream_interface", + "../api:scoped_refptr", + "../rtc_base:refcount", + "../test:test_support", + ] + } + rtc_library("create_time_controller") { visibility = [ "*" ] testonly = true @@ -1118,6 +1199,7 @@ if (rtc_include_tests) { ":fake_frame_encryptor", ":mock_async_dns_resolver", ":mock_audio_mixer", + ":mock_audio_sink", ":mock_data_channel", ":mock_frame_decryptor", ":mock_frame_encryptor", @@ -1131,6 +1213,7 @@ if (rtc_include_tests) { ":mock_video_codec_factory", ":mock_video_decoder", ":mock_video_encoder", + ":mock_video_track", ":rtc_api_unittests", "units:units_unittests", ] diff --git a/api/DEPS b/api/DEPS index cdd17e9909..ff493bf77f 100644 --- a/api/DEPS +++ b/api/DEPS @@ -112,14 +112,6 @@ specific_include_rules = { "+rtc_base/ref_count.h", ], - "jsep_ice_candidate\.h": [ - "+rtc_base/constructor_magic.h", - ], - - "jsep_session_description\.h": [ - "+rtc_base/constructor_magic.h", - ], - "media_stream_interface\.h": [ "+modules/audio_processing/include/audio_processing_statistics.h", "+rtc_base/ref_count.h", @@ -156,7 +148,6 @@ specific_include_rules = { ], "ref_counted_base\.h": [ - "+rtc_base/constructor_magic.h", "+rtc_base/ref_count.h", "+rtc_base/ref_counter.h", ], @@ -168,6 +159,9 @@ specific_include_rules = { # For private member and constructor. "+rtc_base/system/file_wrapper.h", ], + "rtp_packet_infos\.h": [ + "+rtc_base/ref_counted_object.h", + ], "rtp_receiver_interface\.h": [ "+rtc_base/ref_count.h", ], @@ -192,7 +186,6 @@ specific_include_rules = { ], "stats_types\.h": [ - "+rtc_base/constructor_magic.h", "+rtc_base/ref_count.h", "+rtc_base/thread_checker.h", ], @@ -201,17 +194,12 @@ specific_include_rules = { "+rtc_base/ref_count.h", ], - "audio_frame\.h": [ - "+rtc_base/constructor_magic.h", - ], - "audio_mixer\.h": [ "+rtc_base/ref_count.h", ], "audio_decoder\.h": [ "+rtc_base/buffer.h", - "+rtc_base/constructor_magic.h", ], "audio_decoder_factory\.h": [ @@ -267,10 +255,25 @@ specific_include_rules = { "+rtc_base/ref_counted_object.h", ], + "fake_metronome\.h": [ + "+rtc_base/synchronization/mutex.h", + "+rtc_base/task_queue.h", + "+rtc_base/task_utils/repeating_task.h", + "+rtc_base/thread_annotations.h", + ], + "mock.*\.h": [ "+test/gmock.h", ], + "mock_peerconnectioninterface\.h": [ + "+rtc_base/ref_counted_object.h", + ], + + "mock_video_track\.h": [ + "+rtc_base/ref_counted_object.h", + ], + "simulated_network\.h": [ "+rtc_base/random.h", "+rtc_base/thread_annotations.h", @@ -297,6 +300,14 @@ specific_include_rules = { "+rtc_base/thread_annotations.h", ], + "wrapping_async_dns_resolver\.h": [ + "+rtc_base/async_resolver.h", + "+rtc_base/async_resolver_interface.h", + "+rtc_base/socket_address.h", + "+rtc_base/third_party/sigslot/sigslot.h", + "+rtc_base/thread_annotations.h", + ], + # .cc files in api/ should not be restricted in what they can #include, # so we re-add all the top-level directories here. (That's because .h # files leak their #includes to whoever's #including them, but .cc files diff --git a/api/OWNERS b/api/OWNERS index 6ffb2588aa..516ae17f5e 100644 --- a/api/OWNERS +++ b/api/OWNERS @@ -1,7 +1,6 @@ crodbro@webrtc.org deadbeef@webrtc.org hta@webrtc.org -juberti@webrtc.org magjed@webrtc.org perkj@webrtc.org tkchin@webrtc.org diff --git a/api/adaptation/resource.h b/api/adaptation/resource.h index 9b3968055f..7d7c70b3eb 100644 --- a/api/adaptation/resource.h +++ b/api/adaptation/resource.h @@ -57,7 +57,7 @@ class RTC_EXPORT Resource : public rtc::RefCountInterface { ~Resource() override; virtual std::string Name() const = 0; - // The |listener| may be informed of resource usage measurements on any task + // The `listener` may be informed of resource usage measurements on any task // queue, but not after this method is invoked with the null argument. virtual void SetResourceListener(ResourceListener* listener) = 0; }; diff --git a/api/array_view.h b/api/array_view.h index df365cb740..2d68f1650f 100644 --- a/api/array_view.h +++ b/api/array_view.h @@ -84,7 +84,7 @@ namespace rtc { // a pointer if fix-sized) and trivially copyable, so it's probably cheaper to // pass it by value than by const reference. -namespace impl { +namespace array_view_internal { // Magic constant for indicating that the size of an ArrayView is variable // instead of fixed. @@ -125,7 +125,7 @@ class ArrayViewBase { // Specialized base class for ArrayViews of variable size. template -class ArrayViewBase { +class ArrayViewBase { public: ArrayViewBase(T* data, size_t size) : data_(size == 0 ? nullptr : data), size_(size) {} @@ -142,10 +142,11 @@ class ArrayViewBase { size_t size_; }; -} // namespace impl +} // namespace array_view_internal -template -class ArrayView final : public impl::ArrayViewBase { +template +class ArrayView final : public array_view_internal::ArrayViewBase { public: using value_type = T; using const_iterator = const T*; @@ -153,7 +154,7 @@ class ArrayView final : public impl::ArrayViewBase { // Construct an ArrayView from a pointer and a length. template ArrayView(U* data, size_t size) - : impl::ArrayViewBase::ArrayViewBase(data, size) { + : array_view_internal::ArrayViewBase::ArrayViewBase(data, size) { RTC_DCHECK_EQ(size == 0 ? nullptr : data, this->data()); RTC_DCHECK_EQ(size, this->size()); RTC_DCHECK_EQ(!this->data(), @@ -167,7 +168,8 @@ class ArrayView final : public impl::ArrayViewBase { : ArrayView() {} ArrayView(std::nullptr_t, size_t size) : ArrayView(static_cast(nullptr), size) { - static_assert(Size == 0 || Size == impl::kArrayViewVarSize, ""); + static_assert(Size == 0 || Size == array_view_internal::kArrayViewVarSize, + ""); RTC_DCHECK_EQ(0, size); } @@ -175,7 +177,7 @@ class ArrayView final : public impl::ArrayViewBase { template ArrayView(U (&array)[N]) // NOLINT : ArrayView(array, N) { - static_assert(Size == N || Size == impl::kArrayViewVarSize, + static_assert(Size == N || Size == array_view_internal::kArrayViewVarSize, "Array size must match ArrayView size"); } @@ -208,7 +210,7 @@ class ArrayView final : public impl::ArrayViewBase { // N> when M != N. template < typename U, - typename std::enable_if::value>::type* = nullptr> ArrayView(U& u) // NOLINT : ArrayView(u.data(), u.size()) { @@ -216,7 +218,7 @@ class ArrayView final : public impl::ArrayViewBase { } template < typename U, - typename std::enable_if::value>::type* = nullptr> ArrayView(const U& u) // NOLINT(runtime/explicit) : ArrayView(u.data(), u.size()) { @@ -236,13 +238,13 @@ class ArrayView final : public impl::ArrayViewBase { // const rtc::Buffer to ArrayView. template < typename U, - typename std::enable_if::value>::type* = nullptr> ArrayView(U& u) // NOLINT : ArrayView(u.data(), u.size()) {} template < typename U, - typename std::enable_if::value>::type* = nullptr> ArrayView(const U& u) // NOLINT(runtime/explicit) : ArrayView(u.data(), u.size()) {} diff --git a/api/async_dns_resolver.h b/api/async_dns_resolver.h index eabb41c11f..138503b59f 100644 --- a/api/async_dns_resolver.h +++ b/api/async_dns_resolver.h @@ -41,21 +41,26 @@ namespace webrtc { class AsyncDnsResolverResult { public: virtual ~AsyncDnsResolverResult() = default; - // Returns true iff the address from |Start| was successfully resolved. - // If the address was successfully resolved, sets |addr| to a copy of the - // address from |Start| with the IP address set to the top most resolved - // address of |family| (|addr| will have both hostname and the resolved ip). + // Returns true iff the address from `Start` was successfully resolved. + // If the address was successfully resolved, sets `addr` to a copy of the + // address from `Start` with the IP address set to the top most resolved + // address of `family` (`addr` will have both hostname and the resolved ip). virtual bool GetResolvedAddress(int family, rtc::SocketAddress* addr) const = 0; // Returns error from resolver. virtual int GetError() const = 0; }; +// The API for a single name query. +// The constructor, destructor and all functions must be called from +// the same sequence, and the callback will also be called on that sequence. +// The class guarantees that the callback will not be called if the +// resolver's destructor has been called. class RTC_EXPORT AsyncDnsResolverInterface { public: virtual ~AsyncDnsResolverInterface() = default; - // Start address resolution of the hostname in |addr|. + // Start address resolution of the hostname in `addr`. virtual void Start(const rtc::SocketAddress& addr, std::function callback) = 0; virtual const AsyncDnsResolverResult& result() const = 0; @@ -70,7 +75,7 @@ class AsyncDnsResolverFactoryInterface { // Creates an AsyncDnsResolver and starts resolving the name. The callback // will be called when resolution is finished. - // The callback will be called on the thread that the caller runs on. + // The callback will be called on the sequence that the caller runs on. virtual std::unique_ptr CreateAndResolve( const rtc::SocketAddress& addr, std::function callback) = 0; diff --git a/api/audio/BUILD.gn b/api/audio/BUILD.gn index d0465bbc40..49cf95dbce 100644 --- a/api/audio/BUILD.gn +++ b/api/audio/BUILD.gn @@ -95,6 +95,7 @@ rtc_source_set("echo_control") { rtc_source_set("echo_detector_creator") { visibility = [ "*" ] + allow_poison = [ "default_echo_detector" ] sources = [ "echo_detector_creator.cc", "echo_detector_creator.h", @@ -102,7 +103,7 @@ rtc_source_set("echo_detector_creator") { deps = [ "../../api:scoped_refptr", "../../modules/audio_processing:api", - "../../modules/audio_processing:audio_processing", + "../../modules/audio_processing:residual_echo_detector", "../../rtc_base:refcount", ] } diff --git a/api/audio/audio_frame.cc b/api/audio/audio_frame.cc index c6e5cf4dd6..3e12006386 100644 --- a/api/audio/audio_frame.cc +++ b/api/audio/audio_frame.cc @@ -11,8 +11,6 @@ #include "api/audio/audio_frame.h" #include -#include -#include #include "rtc_base/checks.h" #include "rtc_base/time_utils.h" @@ -24,35 +22,13 @@ AudioFrame::AudioFrame() { static_assert(sizeof(data_) == kMaxDataSizeBytes, "kMaxDataSizeBytes"); } -void swap(AudioFrame& a, AudioFrame& b) { - using std::swap; - swap(a.timestamp_, b.timestamp_); - swap(a.elapsed_time_ms_, b.elapsed_time_ms_); - swap(a.ntp_time_ms_, b.ntp_time_ms_); - swap(a.samples_per_channel_, b.samples_per_channel_); - swap(a.sample_rate_hz_, b.sample_rate_hz_); - swap(a.num_channels_, b.num_channels_); - swap(a.channel_layout_, b.channel_layout_); - swap(a.speech_type_, b.speech_type_); - swap(a.vad_activity_, b.vad_activity_); - swap(a.profile_timestamp_ms_, b.profile_timestamp_ms_); - swap(a.packet_infos_, b.packet_infos_); - const size_t length_a = a.samples_per_channel_ * a.num_channels_; - const size_t length_b = b.samples_per_channel_ * b.num_channels_; - RTC_DCHECK_LE(length_a, AudioFrame::kMaxDataSizeSamples); - RTC_DCHECK_LE(length_b, AudioFrame::kMaxDataSizeSamples); - std::swap_ranges(a.data_, a.data_ + std::max(length_a, length_b), b.data_); - swap(a.muted_, b.muted_); - swap(a.absolute_capture_timestamp_ms_, b.absolute_capture_timestamp_ms_); -} - void AudioFrame::Reset() { ResetWithoutMuting(); muted_ = true; } void AudioFrame::ResetWithoutMuting() { - // TODO(wu): Zero is a valid value for |timestamp_|. We should initialize + // TODO(wu): Zero is a valid value for `timestamp_`. We should initialize // to an invalid value, or add a new member to indicate invalidity. timestamp_ = 0; elapsed_time_ms_ = -1; diff --git a/api/audio/audio_frame.h b/api/audio/audio_frame.h index 78539f57eb..d5dcb5f788 100644 --- a/api/audio/audio_frame.h +++ b/api/audio/audio_frame.h @@ -14,11 +14,8 @@ #include #include -#include - #include "api/audio/channel_layout.h" #include "api/rtp_packet_infos.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -60,7 +57,8 @@ class AudioFrame { AudioFrame(); - friend void swap(AudioFrame& a, AudioFrame& b); + AudioFrame(const AudioFrame&) = delete; + AudioFrame& operator=(const AudioFrame&) = delete; // Resets all members to their default state. void Reset(); @@ -139,7 +137,7 @@ class AudioFrame { int64_t profile_timestamp_ms_ = 0; // Information about packets used to assemble this audio frame. This is needed - // by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's + // by `SourceTracker` when the frame is delivered to the RTCRtpReceiver's // MediaStreamTrack, in order to implement getContributingSources(). See: // https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources // @@ -149,7 +147,7 @@ class AudioFrame { // sync buffer is the small sample-holding buffer located after the audio // decoder and before where samples are assembled into output frames. // - // |RtpPacketInfos| may also be empty if the audio samples did not come from + // `RtpPacketInfos` may also be empty if the audio samples did not come from // RTP packets. E.g. if the audio were locally generated by packet loss // concealment, comfort noise generation, etc. RtpPacketInfos packet_infos_; @@ -165,11 +163,9 @@ class AudioFrame { // Absolute capture timestamp when this audio frame was originally captured. // This is only valid for audio frames captured on this machine. The absolute - // capture timestamp of a received frame is found in |packet_infos_|. + // capture timestamp of a received frame is found in `packet_infos_`. // This timestamp MUST be based on the same clock as rtc::TimeMillis(). absl::optional absolute_capture_timestamp_ms_; - - RTC_DISALLOW_COPY_AND_ASSIGN(AudioFrame); }; } // namespace webrtc diff --git a/api/audio/audio_frame_processor.h b/api/audio/audio_frame_processor.h index bc21d14858..cb65c4817e 100644 --- a/api/audio/audio_frame_processor.h +++ b/api/audio/audio_frame_processor.h @@ -28,12 +28,12 @@ class AudioFrameProcessor { // Processes the frame received from WebRTC, is called by WebRTC off the // realtime audio capturing path. AudioFrameProcessor must reply with - // processed frames by calling |sink_callback| if it was provided in SetSink() - // call. |sink_callback| can be called in the context of Process(). + // processed frames by calling `sink_callback` if it was provided in SetSink() + // call. `sink_callback` can be called in the context of Process(). virtual void Process(std::unique_ptr frame) = 0; // Atomically replaces the current sink with the new one. Before the - // first call to this function, or if the provided |sink_callback| is nullptr, + // first call to this function, or if the provided `sink_callback` is nullptr, // processed frames are simply discarded. virtual void SetSink(OnAudioFrameCallback sink_callback) = 0; }; diff --git a/api/audio/audio_mixer.h b/api/audio/audio_mixer.h index b290cfacf0..3483df22bc 100644 --- a/api/audio/audio_mixer.h +++ b/api/audio/audio_mixer.h @@ -35,9 +35,9 @@ class AudioMixer : public rtc::RefCountInterface { kError, // The audio_frame will not be used. }; - // Overwrites |audio_frame|. The data_ field is overwritten with + // Overwrites `audio_frame`. The data_ field is overwritten with // 10 ms of new audio (either 1 or 2 interleaved channels) at - // |sample_rate_hz|. All fields in |audio_frame| must be updated. + // `sample_rate_hz`. All fields in `audio_frame` must be updated. virtual AudioFrameInfo GetAudioFrameWithInfo(int sample_rate_hz, AudioFrame* audio_frame) = 0; @@ -66,7 +66,7 @@ class AudioMixer : public rtc::RefCountInterface { // should mix at a rate that doesn't cause quality loss of the // sources' audio. The mixing rate is one of the rates listed in // AudioProcessing::NativeRate. All fields in - // |audio_frame_for_mixing| must be updated. + // `audio_frame_for_mixing` must be updated. virtual void Mix(size_t number_of_channels, AudioFrame* audio_frame_for_mixing) = 0; diff --git a/api/audio/channel_layout.cc b/api/audio/channel_layout.cc index 567f4d9b26..e4ae356fab 100644 --- a/api/audio/channel_layout.cc +++ b/api/audio/channel_layout.cc @@ -275,7 +275,7 @@ const char* ChannelLayoutToString(ChannelLayout layout) { case CHANNEL_LAYOUT_BITSTREAM: return "BITSTREAM"; } - RTC_NOTREACHED() << "Invalid channel layout provided: " << layout; + RTC_DCHECK_NOTREACHED() << "Invalid channel layout provided: " << layout; return ""; } diff --git a/api/audio/echo_canceller3_config.cc b/api/audio/echo_canceller3_config.cc index b38d6b5b7e..0224c712b4 100644 --- a/api/audio/echo_canceller3_config.cc +++ b/api/audio/echo_canceller3_config.cc @@ -166,6 +166,7 @@ bool EchoCanceller3Config::Validate(EchoCanceller3Config* config) { res = res & Limit(&c->ep_strength.default_gain, 0.f, 1000000.f); res = res & Limit(&c->ep_strength.default_len, -1.f, 1.f); + res = res & Limit(&c->ep_strength.nearend_len, -1.0f, 1.0f); res = res & Limit(&c->echo_audibility.low_render_limit, 0.f, 32768.f * 32768.f); diff --git a/api/audio/echo_canceller3_config.h b/api/audio/echo_canceller3_config.h index 087e8da439..1fd403652a 100644 --- a/api/audio/echo_canceller3_config.h +++ b/api/audio/echo_canceller3_config.h @@ -108,9 +108,11 @@ struct RTC_EXPORT EchoCanceller3Config { struct EpStrength { float default_gain = 1.f; float default_len = 0.83f; + float nearend_len = 0.83f; bool echo_can_saturate = true; bool bounded_erl = false; bool erle_onset_compensation_in_dominant_nearend = false; + bool use_conservative_tail_frequency_response = true; } ep_strength; struct EchoAudibility { @@ -207,6 +209,7 @@ struct RTC_EXPORT EchoCanceller3Config { int hold_duration = 50; int trigger_threshold = 12; bool use_during_initial_phase = true; + bool use_unbounded_echo_spectrum = true; } dominant_nearend_detection; struct SubbandNearendDetection { diff --git a/api/audio/echo_canceller3_config_json.cc b/api/audio/echo_canceller3_config_json.cc index 263599c538..71966c13b3 100644 --- a/api/audio/echo_canceller3_config_json.cc +++ b/api/audio/echo_canceller3_config_json.cc @@ -259,10 +259,13 @@ void Aec3ConfigFromJsonString(absl::string_view json_string, if (rtc::GetValueFromJsonObject(aec3_root, "ep_strength", §ion)) { ReadParam(section, "default_gain", &cfg.ep_strength.default_gain); ReadParam(section, "default_len", &cfg.ep_strength.default_len); + ReadParam(section, "nearend_len", &cfg.ep_strength.nearend_len); ReadParam(section, "echo_can_saturate", &cfg.ep_strength.echo_can_saturate); ReadParam(section, "bounded_erl", &cfg.ep_strength.bounded_erl); ReadParam(section, "erle_onset_compensation_in_dominant_nearend", &cfg.ep_strength.erle_onset_compensation_in_dominant_nearend); + ReadParam(section, "use_conservative_tail_frequency_response", + &cfg.ep_strength.use_conservative_tail_frequency_response); } if (rtc::GetValueFromJsonObject(aec3_root, "echo_audibility", §ion)) { @@ -371,6 +374,9 @@ void Aec3ConfigFromJsonString(absl::string_view json_string, ReadParam( subsection, "use_during_initial_phase", &cfg.suppressor.dominant_nearend_detection.use_during_initial_phase); + ReadParam(subsection, "use_unbounded_echo_spectrum", + &cfg.suppressor.dominant_nearend_detection + .use_unbounded_echo_spectrum); } if (rtc::GetValueFromJsonObject(section, "subband_nearend_detection", @@ -560,12 +566,17 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) { ost << "\"ep_strength\": {"; ost << "\"default_gain\": " << config.ep_strength.default_gain << ","; ost << "\"default_len\": " << config.ep_strength.default_len << ","; + ost << "\"nearend_len\": " << config.ep_strength.nearend_len << ","; ost << "\"echo_can_saturate\": " << (config.ep_strength.echo_can_saturate ? "true" : "false") << ","; ost << "\"bounded_erl\": " << (config.ep_strength.bounded_erl ? "true" : "false") << ","; ost << "\"erle_onset_compensation_in_dominant_nearend\": " << (config.ep_strength.erle_onset_compensation_in_dominant_nearend + ? "true" + : "false") << ","; + ost << "\"use_conservative_tail_frequency_response\": " + << (config.ep_strength.use_conservative_tail_frequency_response ? "true" : "false"); ost << "},"; @@ -676,20 +687,20 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) { << config.suppressor.last_lf_smoothing_band << ","; ost << "\"last_lf_band\": " << config.suppressor.last_lf_band << ","; ost << "\"first_hf_band\": " << config.suppressor.first_hf_band << ","; - ost << "\"dominant_nearend_detection\": {"; - ost << "\"enr_threshold\": " - << config.suppressor.dominant_nearend_detection.enr_threshold << ","; - ost << "\"enr_exit_threshold\": " - << config.suppressor.dominant_nearend_detection.enr_exit_threshold << ","; - ost << "\"snr_threshold\": " - << config.suppressor.dominant_nearend_detection.snr_threshold << ","; - ost << "\"hold_duration\": " - << config.suppressor.dominant_nearend_detection.hold_duration << ","; - ost << "\"trigger_threshold\": " - << config.suppressor.dominant_nearend_detection.trigger_threshold << ","; - ost << "\"use_during_initial_phase\": " - << config.suppressor.dominant_nearend_detection.use_during_initial_phase; - ost << "},"; + { + const auto& dnd = config.suppressor.dominant_nearend_detection; + ost << "\"dominant_nearend_detection\": {"; + ost << "\"enr_threshold\": " << dnd.enr_threshold << ","; + ost << "\"enr_exit_threshold\": " << dnd.enr_exit_threshold << ","; + ost << "\"snr_threshold\": " << dnd.snr_threshold << ","; + ost << "\"hold_duration\": " << dnd.hold_duration << ","; + ost << "\"trigger_threshold\": " << dnd.trigger_threshold << ","; + ost << "\"use_during_initial_phase\": " << dnd.use_during_initial_phase + << ","; + ost << "\"use_unbounded_echo_spectrum\": " + << dnd.use_unbounded_echo_spectrum; + ost << "},"; + } ost << "\"subband_nearend_detection\": {"; ost << "\"nearend_average_blocks\": " << config.suppressor.subband_nearend_detection.nearend_average_blocks diff --git a/api/audio/test/audio_frame_unittest.cc b/api/audio/test/audio_frame_unittest.cc index f8d3318274..dbf45ceabc 100644 --- a/api/audio/test/audio_frame_unittest.cc +++ b/api/audio/test/audio_frame_unittest.cc @@ -133,54 +133,4 @@ TEST(AudioFrameTest, CopyFrom) { EXPECT_EQ(0, memcmp(frame2.data(), frame1.data(), sizeof(samples))); } -TEST(AudioFrameTest, SwapFrames) { - AudioFrame frame1, frame2; - int16_t samples1[kNumChannelsMono * kSamplesPerChannel]; - for (size_t i = 0; i < kNumChannelsMono * kSamplesPerChannel; ++i) { - samples1[i] = i; - } - frame1.UpdateFrame(kTimestamp, samples1, kSamplesPerChannel, kSampleRateHz, - AudioFrame::kPLC, AudioFrame::kVadActive, - kNumChannelsMono); - frame1.set_absolute_capture_timestamp_ms(12345678); - const auto frame1_channel_layout = frame1.channel_layout(); - - int16_t samples2[(kNumChannelsMono + 1) * (kSamplesPerChannel + 1)]; - for (size_t i = 0; i < (kNumChannelsMono + 1) * (kSamplesPerChannel + 1); - ++i) { - samples2[i] = 1000 + i; - } - frame2.UpdateFrame(kTimestamp + 1, samples2, kSamplesPerChannel + 1, - kSampleRateHz + 1, AudioFrame::kNormalSpeech, - AudioFrame::kVadPassive, kNumChannelsMono + 1); - const auto frame2_channel_layout = frame2.channel_layout(); - - swap(frame1, frame2); - - EXPECT_EQ(kTimestamp + 1, frame1.timestamp_); - ASSERT_EQ(kSamplesPerChannel + 1, frame1.samples_per_channel_); - EXPECT_EQ(kSampleRateHz + 1, frame1.sample_rate_hz_); - EXPECT_EQ(AudioFrame::kNormalSpeech, frame1.speech_type_); - EXPECT_EQ(AudioFrame::kVadPassive, frame1.vad_activity_); - ASSERT_EQ(kNumChannelsMono + 1, frame1.num_channels_); - for (size_t i = 0; i < (kNumChannelsMono + 1) * (kSamplesPerChannel + 1); - ++i) { - EXPECT_EQ(samples2[i], frame1.data()[i]); - } - EXPECT_FALSE(frame1.absolute_capture_timestamp_ms()); - EXPECT_EQ(frame2_channel_layout, frame1.channel_layout()); - - EXPECT_EQ(kTimestamp, frame2.timestamp_); - ASSERT_EQ(kSamplesPerChannel, frame2.samples_per_channel_); - EXPECT_EQ(kSampleRateHz, frame2.sample_rate_hz_); - EXPECT_EQ(AudioFrame::kPLC, frame2.speech_type_); - EXPECT_EQ(AudioFrame::kVadActive, frame2.vad_activity_); - ASSERT_EQ(kNumChannelsMono, frame2.num_channels_); - for (size_t i = 0; i < kNumChannelsMono * kSamplesPerChannel; ++i) { - EXPECT_EQ(samples1[i], frame2.data()[i]); - } - EXPECT_EQ(12345678, frame2.absolute_capture_timestamp_ms()); - EXPECT_EQ(frame1_channel_layout, frame2.channel_layout()); -} - } // namespace webrtc diff --git a/api/audio/test/echo_canceller3_config_json_unittest.cc b/api/audio/test/echo_canceller3_config_json_unittest.cc index d6edd07d2e..bb28b4feb3 100644 --- a/api/audio/test/echo_canceller3_config_json_unittest.cc +++ b/api/audio/test/echo_canceller3_config_json_unittest.cc @@ -37,6 +37,8 @@ TEST(EchoCanceller3JsonHelpers, ToStringAndParseJson) { // Expect unchanged values to remain default. EXPECT_EQ(cfg.ep_strength.default_len, cfg_transformed.ep_strength.default_len); + EXPECT_EQ(cfg.ep_strength.nearend_len, + cfg_transformed.ep_strength.nearend_len); EXPECT_EQ(cfg.suppressor.normal_tuning.mask_lf.enr_suppress, cfg_transformed.suppressor.normal_tuning.mask_lf.enr_suppress); diff --git a/api/audio_codecs/L16/audio_decoder_L16.cc b/api/audio_codecs/L16/audio_decoder_L16.cc index 57c9e76889..93863f1020 100644 --- a/api/audio_codecs/L16/audio_decoder_L16.cc +++ b/api/audio_codecs/L16/audio_decoder_L16.cc @@ -24,9 +24,10 @@ absl::optional AudioDecoderL16::SdpToConfig( Config config; config.sample_rate_hz = format.clockrate_hz; config.num_channels = rtc::checked_cast(format.num_channels); - return absl::EqualsIgnoreCase(format.name, "L16") && config.IsOk() - ? absl::optional(config) - : absl::nullopt; + if (absl::EqualsIgnoreCase(format.name, "L16") && config.IsOk()) { + return config; + } + return absl::nullopt; } void AudioDecoderL16::AppendSupportedDecoders( @@ -37,9 +38,11 @@ void AudioDecoderL16::AppendSupportedDecoders( std::unique_ptr AudioDecoderL16::MakeAudioDecoder( const Config& config, absl::optional /*codec_pair_id*/) { - return config.IsOk() ? std::make_unique( - config.sample_rate_hz, config.num_channels) - : nullptr; + if (!config.IsOk()) { + return nullptr; + } + return std::make_unique(config.sample_rate_hz, + config.num_channels); } } // namespace webrtc diff --git a/api/audio_codecs/L16/audio_decoder_L16.h b/api/audio_codecs/L16/audio_decoder_L16.h index f0be03659c..581a5b82c1 100644 --- a/api/audio_codecs/L16/audio_decoder_L16.h +++ b/api/audio_codecs/L16/audio_decoder_L16.h @@ -29,7 +29,8 @@ struct RTC_EXPORT AudioDecoderL16 { bool IsOk() const { return (sample_rate_hz == 8000 || sample_rate_hz == 16000 || sample_rate_hz == 32000 || sample_rate_hz == 48000) && - num_channels >= 1; + (num_channels >= 1 && + num_channels <= AudioDecoder::kMaxNumberOfChannels); } int sample_rate_hz = 8000; int num_channels = 1; diff --git a/api/audio_codecs/L16/audio_encoder_L16.cc b/api/audio_codecs/L16/audio_encoder_L16.cc index 507c8d7d26..590d3e32d9 100644 --- a/api/audio_codecs/L16/audio_encoder_L16.cc +++ b/api/audio_codecs/L16/audio_encoder_L16.cc @@ -24,6 +24,7 @@ namespace webrtc { absl::optional AudioEncoderL16::SdpToConfig( const SdpAudioFormat& format) { if (!rtc::IsValueInRangeForNumericType(format.num_channels)) { + RTC_DCHECK_NOTREACHED(); return absl::nullopt; } Config config; @@ -36,9 +37,10 @@ absl::optional AudioEncoderL16::SdpToConfig( config.frame_size_ms = rtc::SafeClamp(10 * (*ptime / 10), 10, 60); } } - return absl::EqualsIgnoreCase(format.name, "L16") && config.IsOk() - ? absl::optional(config) - : absl::nullopt; + if (absl::EqualsIgnoreCase(format.name, "L16") && config.IsOk()) { + return config; + } + return absl::nullopt; } void AudioEncoderL16::AppendSupportedEncoders( @@ -58,12 +60,15 @@ std::unique_ptr AudioEncoderL16::MakeAudioEncoder( const AudioEncoderL16::Config& config, int payload_type, absl::optional /*codec_pair_id*/) { - RTC_DCHECK(config.IsOk()); AudioEncoderPcm16B::Config c; c.sample_rate_hz = config.sample_rate_hz; c.num_channels = config.num_channels; c.frame_size_ms = config.frame_size_ms; c.payload_type = payload_type; + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return nullptr; + } return std::make_unique(c); } diff --git a/api/audio_codecs/L16/audio_encoder_L16.h b/api/audio_codecs/L16/audio_encoder_L16.h index b410286802..25d221148e 100644 --- a/api/audio_codecs/L16/audio_encoder_L16.h +++ b/api/audio_codecs/L16/audio_encoder_L16.h @@ -29,7 +29,9 @@ struct RTC_EXPORT AudioEncoderL16 { bool IsOk() const { return (sample_rate_hz == 8000 || sample_rate_hz == 16000 || sample_rate_hz == 32000 || sample_rate_hz == 48000) && - num_channels >= 1 && frame_size_ms > 0 && frame_size_ms <= 120 && + num_channels >= 1 && + num_channels <= AudioEncoder::kMaxNumberOfChannels && + frame_size_ms > 0 && frame_size_ms <= 120 && frame_size_ms % 10 == 0; } int sample_rate_hz = 8000; diff --git a/api/audio_codecs/audio_decoder.cc b/api/audio_codecs/audio_decoder.cc index 4b18b4ab52..28f5b8aae8 100644 --- a/api/audio_codecs/audio_decoder.cc +++ b/api/audio_codecs/audio_decoder.cc @@ -10,7 +10,6 @@ #include "api/audio_codecs/audio_decoder.h" -#include #include #include @@ -162,9 +161,10 @@ AudioDecoder::SpeechType AudioDecoder::ConvertSpeechType(int16_t type) { case 2: return kComfortNoise; default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return kSpeech; } } +constexpr int AudioDecoder::kMaxNumberOfChannels; } // namespace webrtc diff --git a/api/audio_codecs/audio_decoder.h b/api/audio_codecs/audio_decoder.h index ce235946da..41138741bb 100644 --- a/api/audio_codecs/audio_decoder.h +++ b/api/audio_codecs/audio_decoder.h @@ -20,7 +20,6 @@ #include "absl/types/optional.h" #include "api/array_view.h" #include "rtc_base/buffer.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -37,6 +36,9 @@ class AudioDecoder { AudioDecoder() = default; virtual ~AudioDecoder() = default; + AudioDecoder(const AudioDecoder&) = delete; + AudioDecoder& operator=(const AudioDecoder&) = delete; + class EncodedAudioFrame { public: struct DecodeResult { @@ -53,8 +55,8 @@ class AudioDecoder { // Returns true if this packet contains DTX. virtual bool IsDtxPacket() const; - // Decodes this frame of audio and writes the result in |decoded|. - // |decoded| must be large enough to store as many samples as indicated by a + // Decodes this frame of audio and writes the result in `decoded`. + // `decoded` must be large enough to store as many samples as indicated by a // call to Duration() . On success, returns an absl::optional containing the // total number of samples across all channels, as well as whether the // decoder produced comfort noise or speech. On failure, returns an empty @@ -85,8 +87,8 @@ class AudioDecoder { // Let the decoder parse this payload and prepare zero or more decodable // frames. Each frame must be between 10 ms and 120 ms long. The caller must // ensure that the AudioDecoder object outlives any frame objects returned by - // this call. The decoder is free to swap or move the data from the |payload| - // buffer. |timestamp| is the input timestamp, in samples, corresponding to + // this call. The decoder is free to swap or move the data from the `payload` + // buffer. `timestamp` is the input timestamp, in samples, corresponding to // the start of the payload. virtual std::vector ParsePayload(rtc::Buffer&& payload, uint32_t timestamp); @@ -95,12 +97,12 @@ class AudioDecoder { // obsolete; callers should call ParsePayload instead. For now, subclasses // must still implement DecodeInternal. - // Decodes |encode_len| bytes from |encoded| and writes the result in - // |decoded|. The maximum bytes allowed to be written into |decoded| is - // |max_decoded_bytes|. Returns the total number of samples across all - // channels. If the decoder produced comfort noise, |speech_type| + // Decodes `encode_len` bytes from `encoded` and writes the result in + // `decoded`. The maximum bytes allowed to be written into `decoded` is + // `max_decoded_bytes`. Returns the total number of samples across all + // channels. If the decoder produced comfort noise, `speech_type` // is set to kComfortNoise, otherwise it is kSpeech. The desired output - // sample rate is provided in |sample_rate_hz|, which must be valid for the + // sample rate is provided in `sample_rate_hz`, which must be valid for the // codec at hand. int Decode(const uint8_t* encoded, size_t encoded_len, @@ -123,11 +125,11 @@ class AudioDecoder { // Calls the packet-loss concealment of the decoder to update the state after // one or several lost packets. The caller has to make sure that the - // memory allocated in |decoded| should accommodate |num_frames| frames. + // memory allocated in `decoded` should accommodate `num_frames` frames. virtual size_t DecodePlc(size_t num_frames, int16_t* decoded); // Asks the decoder to generate packet-loss concealment and append it to the - // end of |concealment_audio|. The concealment audio should be in + // end of `concealment_audio`. The concealment audio should be in // channel-interleaved format, with as many channels as the last decoded // packet produced. The implementation must produce at least // requested_samples_per_channel, or nothing at all. This is a signal to the @@ -146,19 +148,19 @@ class AudioDecoder { // Returns the last error code from the decoder. virtual int ErrorCode(); - // Returns the duration in samples-per-channel of the payload in |encoded| - // which is |encoded_len| bytes long. Returns kNotImplemented if no duration + // Returns the duration in samples-per-channel of the payload in `encoded` + // which is `encoded_len` bytes long. Returns kNotImplemented if no duration // estimate is available, or -1 in case of an error. virtual int PacketDuration(const uint8_t* encoded, size_t encoded_len) const; // Returns the duration in samples-per-channel of the redandant payload in - // |encoded| which is |encoded_len| bytes long. Returns kNotImplemented if no + // `encoded` which is `encoded_len` bytes long. Returns kNotImplemented if no // duration estimate is available, or -1 in case of an error. virtual int PacketDurationRedundant(const uint8_t* encoded, size_t encoded_len) const; // Detects whether a packet has forward error correction. The packet is - // comprised of the samples in |encoded| which is |encoded_len| bytes long. + // comprised of the samples in `encoded` which is `encoded_len` bytes long. // Returns true if the packet has FEC and false otherwise. virtual bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const; @@ -170,6 +172,9 @@ class AudioDecoder { // during the lifetime of the decoder. virtual size_t Channels() const = 0; + // The maximum number of audio channels supported by WebRTC decoders. + static constexpr int kMaxNumberOfChannels = 24; + protected: static SpeechType ConvertSpeechType(int16_t type); @@ -184,9 +189,6 @@ class AudioDecoder { int sample_rate_hz, int16_t* decoded, SpeechType* speech_type); - - private: - RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoder); }; } // namespace webrtc diff --git a/api/audio_codecs/audio_decoder_factory_template.h b/api/audio_codecs/audio_decoder_factory_template.h index 388668d4c6..976f9c62d7 100644 --- a/api/audio_codecs/audio_decoder_factory_template.h +++ b/api/audio_codecs/audio_decoder_factory_template.h @@ -89,8 +89,8 @@ class AudioDecoderFactoryT : public AudioDecoderFactory { // Each decoder type is given as a template argument to the function; it should // be a struct with the following static member functions: // -// // Converts |audio_format| to a ConfigType instance. Returns an empty -// // optional if |audio_format| doesn't correctly specify a decoder of our +// // Converts `audio_format` to a ConfigType instance. Returns an empty +// // optional if `audio_format` doesn't correctly specify a decoder of our // // type. // absl::optional SdpToConfig(const SdpAudioFormat& audio_format); // diff --git a/api/audio_codecs/audio_encoder.cc b/api/audio_codecs/audio_encoder.cc index cd4d2000d2..31bb8739f7 100644 --- a/api/audio_codecs/audio_encoder.cc +++ b/api/audio_codecs/audio_encoder.cc @@ -83,7 +83,7 @@ void AudioEncoder::OnReceivedUplinkPacketLossFraction( void AudioEncoder::OnReceivedUplinkRecoverablePacketLossFraction( float uplink_recoverable_packet_loss_fraction) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } void AudioEncoder::OnReceivedTargetAudioBitrate(int target_audio_bitrate_bps) { @@ -110,4 +110,5 @@ ANAStats AudioEncoder::GetANAStats() const { return ANAStats(); } +constexpr int AudioEncoder::kMaxNumberOfChannels; } // namespace webrtc diff --git a/api/audio_codecs/audio_encoder.h b/api/audio_codecs/audio_encoder.h index 92e42cf107..7f5a34214f 100644 --- a/api/audio_codecs/audio_encoder.h +++ b/api/audio_codecs/audio_encoder.h @@ -95,13 +95,13 @@ class AudioEncoder { // This is the main struct for auxiliary encoding information. Each encoded // packet should be accompanied by one EncodedInfo struct, containing the - // total number of |encoded_bytes|, the |encoded_timestamp| and the - // |payload_type|. If the packet contains redundant encodings, the |redundant| + // total number of `encoded_bytes`, the `encoded_timestamp` and the + // `payload_type`. If the packet contains redundant encodings, the `redundant` // vector will be populated with EncodedInfoLeaf structs. Each struct in the // vector represents one encoding; the order of structs in the vector is the // same as the order in which the actual payloads are written to the byte // stream. When EncoderInfoLeaf structs are present in the vector, the main - // struct's |encoded_bytes| will be the sum of all the |encoded_bytes| in the + // struct's `encoded_bytes` will be the sum of all the `encoded_bytes` in the // vector. struct EncodedInfo : public EncodedInfoLeaf { EncodedInfo(); @@ -143,7 +143,7 @@ class AudioEncoder { // Accepts one 10 ms block of input audio (i.e., SampleRateHz() / 100 * // NumChannels() samples). Multi-channel audio must be sample-interleaved. - // The encoder appends zero or more bytes of output to |encoded| and returns + // The encoder appends zero or more bytes of output to `encoded` and returns // additional encoding information. Encode() checks some preconditions, calls // EncodeImpl() which does the actual work, and then checks some // postconditions. @@ -205,7 +205,7 @@ class AudioEncoder { virtual void DisableAudioNetworkAdaptor(); // Provides uplink packet loss fraction to this encoder to allow it to adapt. - // |uplink_packet_loss_fraction| is in the range [0.0, 1.0]. + // `uplink_packet_loss_fraction` is in the range [0.0, 1.0]. virtual void OnReceivedUplinkPacketLossFraction( float uplink_packet_loss_fraction); @@ -246,6 +246,9 @@ class AudioEncoder { virtual absl::optional> GetFrameLengthRange() const = 0; + // The maximum number of audio channels supported by WebRTC encoders. + static constexpr int kMaxNumberOfChannels = 24; + protected: // Subclasses implement this to perform the actual encoding. Called by // Encode(). diff --git a/api/audio_codecs/audio_encoder_factory_template.h b/api/audio_codecs/audio_encoder_factory_template.h index cdc7defd25..4dc0672c46 100644 --- a/api/audio_codecs/audio_encoder_factory_template.h +++ b/api/audio_codecs/audio_encoder_factory_template.h @@ -103,8 +103,8 @@ class AudioEncoderFactoryT : public AudioEncoderFactory { // Each encoder type is given as a template argument to the function; it should // be a struct with the following static member functions: // -// // Converts |audio_format| to a ConfigType instance. Returns an empty -// // optional if |audio_format| doesn't correctly specify an encoder of our +// // Converts `audio_format` to a ConfigType instance. Returns an empty +// // optional if `audio_format` doesn't correctly specify an encoder of our // // type. // absl::optional SdpToConfig(const SdpAudioFormat& audio_format); // diff --git a/api/audio_codecs/audio_format.h b/api/audio_codecs/audio_format.h index 9f61729fdb..0cf67799b8 100644 --- a/api/audio_codecs/audio_format.h +++ b/api/audio_codecs/audio_format.h @@ -39,7 +39,7 @@ struct RTC_EXPORT SdpAudioFormat { Parameters&& param); ~SdpAudioFormat(); - // Returns true if this format is compatible with |o|. In SDP terminology: + // Returns true if this format is compatible with `o`. In SDP terminology: // would it represent the same codec between an offer and an answer? As // opposed to operator==, this method disregards codec parameters. bool Matches(const SdpAudioFormat& o) const; diff --git a/api/audio_codecs/g711/audio_decoder_g711.cc b/api/audio_codecs/g711/audio_decoder_g711.cc index 57e3741bef..f3d3378cf2 100644 --- a/api/audio_codecs/g711/audio_decoder_g711.cc +++ b/api/audio_codecs/g711/audio_decoder_g711.cc @@ -28,7 +28,10 @@ absl::optional AudioDecoderG711::SdpToConfig( Config config; config.type = is_pcmu ? Config::Type::kPcmU : Config::Type::kPcmA; config.num_channels = rtc::dchecked_cast(format.num_channels); - RTC_DCHECK(config.IsOk()); + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return absl::nullopt; + } return config; } else { return absl::nullopt; @@ -45,13 +48,17 @@ void AudioDecoderG711::AppendSupportedDecoders( std::unique_ptr AudioDecoderG711::MakeAudioDecoder( const Config& config, absl::optional /*codec_pair_id*/) { - RTC_DCHECK(config.IsOk()); + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return nullptr; + } switch (config.type) { case Config::Type::kPcmU: return std::make_unique(config.num_channels); case Config::Type::kPcmA: return std::make_unique(config.num_channels); default: + RTC_DCHECK_NOTREACHED(); return nullptr; } } diff --git a/api/audio_codecs/g711/audio_decoder_g711.h b/api/audio_codecs/g711/audio_decoder_g711.h index ccd1ee0480..18c15a8d60 100644 --- a/api/audio_codecs/g711/audio_decoder_g711.h +++ b/api/audio_codecs/g711/audio_decoder_g711.h @@ -28,7 +28,9 @@ struct RTC_EXPORT AudioDecoderG711 { struct Config { enum class Type { kPcmU, kPcmA }; bool IsOk() const { - return (type == Type::kPcmU || type == Type::kPcmA) && num_channels >= 1; + return (type == Type::kPcmU || type == Type::kPcmA) && + num_channels >= 1 && + num_channels <= AudioDecoder::kMaxNumberOfChannels; } Type type; int num_channels; diff --git a/api/audio_codecs/g711/audio_encoder_g711.cc b/api/audio_codecs/g711/audio_encoder_g711.cc index ab95ad45d5..4c1ce0f8e6 100644 --- a/api/audio_codecs/g711/audio_encoder_g711.cc +++ b/api/audio_codecs/g711/audio_encoder_g711.cc @@ -38,7 +38,10 @@ absl::optional AudioEncoderG711::SdpToConfig( config.frame_size_ms = rtc::SafeClamp(10 * (*ptime / 10), 10, 60); } } - RTC_DCHECK(config.IsOk()); + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return absl::nullopt; + } return config; } else { return absl::nullopt; @@ -62,7 +65,10 @@ std::unique_ptr AudioEncoderG711::MakeAudioEncoder( const Config& config, int payload_type, absl::optional /*codec_pair_id*/) { - RTC_DCHECK(config.IsOk()); + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return nullptr; + } switch (config.type) { case Config::Type::kPcmU: { AudioEncoderPcmU::Config impl_config; @@ -79,6 +85,7 @@ std::unique_ptr AudioEncoderG711::MakeAudioEncoder( return std::make_unique(impl_config); } default: { + RTC_DCHECK_NOTREACHED(); return nullptr; } } diff --git a/api/audio_codecs/g711/audio_encoder_g711.h b/api/audio_codecs/g711/audio_encoder_g711.h index 23ae18b5e3..29fe38f1a0 100644 --- a/api/audio_codecs/g711/audio_encoder_g711.h +++ b/api/audio_codecs/g711/audio_encoder_g711.h @@ -29,7 +29,9 @@ struct RTC_EXPORT AudioEncoderG711 { enum class Type { kPcmU, kPcmA }; bool IsOk() const { return (type == Type::kPcmU || type == Type::kPcmA) && - frame_size_ms > 0 && frame_size_ms % 10 == 0 && num_channels >= 1; + frame_size_ms > 0 && frame_size_ms % 10 == 0 && + num_channels >= 1 && + num_channels <= AudioEncoder::kMaxNumberOfChannels; } Type type = Type::kPcmU; int num_channels = 1; diff --git a/api/audio_codecs/g722/audio_decoder_g722.cc b/api/audio_codecs/g722/audio_decoder_g722.cc index 29b6d5da0a..0049e5ab32 100644 --- a/api/audio_codecs/g722/audio_decoder_g722.cc +++ b/api/audio_codecs/g722/audio_decoder_g722.cc @@ -21,12 +21,12 @@ namespace webrtc { absl::optional AudioDecoderG722::SdpToConfig( const SdpAudioFormat& format) { - return absl::EqualsIgnoreCase(format.name, "G722") && - format.clockrate_hz == 8000 && - (format.num_channels == 1 || format.num_channels == 2) - ? absl::optional( - Config{rtc::dchecked_cast(format.num_channels)}) - : absl::nullopt; + if (absl::EqualsIgnoreCase(format.name, "G722") && + format.clockrate_hz == 8000 && + (format.num_channels == 1 || format.num_channels == 2)) { + return Config{rtc::dchecked_cast(format.num_channels)}; + } + return absl::nullopt; } void AudioDecoderG722::AppendSupportedDecoders( @@ -37,12 +37,17 @@ void AudioDecoderG722::AppendSupportedDecoders( std::unique_ptr AudioDecoderG722::MakeAudioDecoder( Config config, absl::optional /*codec_pair_id*/) { + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return nullptr; + } switch (config.num_channels) { case 1: return std::make_unique(); case 2: return std::make_unique(); default: + RTC_DCHECK_NOTREACHED(); return nullptr; } } diff --git a/api/audio_codecs/g722/audio_encoder_g722.cc b/api/audio_codecs/g722/audio_encoder_g722.cc index 12c1746eb7..66cf9e19d6 100644 --- a/api/audio_codecs/g722/audio_encoder_g722.cc +++ b/api/audio_codecs/g722/audio_encoder_g722.cc @@ -38,8 +38,11 @@ absl::optional AudioEncoderG722::SdpToConfig( config.frame_size_ms = rtc::SafeClamp(whole_packets * 10, 10, 60); } } - return config.IsOk() ? absl::optional(config) - : absl::nullopt; + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return absl::nullopt; + } + return config; } void AudioEncoderG722::AppendSupportedEncoders( @@ -60,7 +63,10 @@ std::unique_ptr AudioEncoderG722::MakeAudioEncoder( const AudioEncoderG722Config& config, int payload_type, absl::optional /*codec_pair_id*/) { - RTC_DCHECK(config.IsOk()); + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return nullptr; + } return std::make_unique(config, payload_type); } diff --git a/api/audio_codecs/g722/audio_encoder_g722_config.h b/api/audio_codecs/g722/audio_encoder_g722_config.h index 287898589f..f85eef00a8 100644 --- a/api/audio_codecs/g722/audio_encoder_g722_config.h +++ b/api/audio_codecs/g722/audio_encoder_g722_config.h @@ -15,7 +15,8 @@ namespace webrtc { struct AudioEncoderG722Config { bool IsOk() const { - return frame_size_ms > 0 && frame_size_ms % 10 == 0 && num_channels >= 1; + return frame_size_ms > 0 && frame_size_ms % 10 == 0 && num_channels >= 1 && + num_channels <= AudioEncoder::kMaxNumberOfChannels; } int frame_size_ms = 20; int num_channels = 1; diff --git a/api/audio_codecs/ilbc/audio_decoder_ilbc.cc b/api/audio_codecs/ilbc/audio_decoder_ilbc.cc index d0aae9044e..237cef23c1 100644 --- a/api/audio_codecs/ilbc/audio_decoder_ilbc.cc +++ b/api/audio_codecs/ilbc/audio_decoder_ilbc.cc @@ -20,10 +20,11 @@ namespace webrtc { absl::optional AudioDecoderIlbc::SdpToConfig( const SdpAudioFormat& format) { - return absl::EqualsIgnoreCase(format.name, "ILBC") && - format.clockrate_hz == 8000 && format.num_channels == 1 - ? absl::optional(Config()) - : absl::nullopt; + if (absl::EqualsIgnoreCase(format.name, "ILBC") && + format.clockrate_hz == 8000 && format.num_channels == 1) { + return Config(); + } + return absl::nullopt; } void AudioDecoderIlbc::AppendSupportedDecoders( diff --git a/api/audio_codecs/ilbc/audio_encoder_ilbc.cc b/api/audio_codecs/ilbc/audio_encoder_ilbc.cc index 035b0dc34f..52ba8f6b88 100644 --- a/api/audio_codecs/ilbc/audio_encoder_ilbc.cc +++ b/api/audio_codecs/ilbc/audio_encoder_ilbc.cc @@ -53,8 +53,11 @@ absl::optional AudioEncoderIlbc::SdpToConfig( config.frame_size_ms = rtc::SafeClamp(whole_packets * 10, 20, 60); } } - return config.IsOk() ? absl::optional(config) - : absl::nullopt; + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return absl::nullopt; + } + return config; } void AudioEncoderIlbc::AppendSupportedEncoders( @@ -74,7 +77,10 @@ std::unique_ptr AudioEncoderIlbc::MakeAudioEncoder( const AudioEncoderIlbcConfig& config, int payload_type, absl::optional /*codec_pair_id*/) { - RTC_DCHECK(config.IsOk()); + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return nullptr; + } return std::make_unique(config, payload_type); } diff --git a/api/audio_codecs/isac/audio_decoder_isac_fix.cc b/api/audio_codecs/isac/audio_decoder_isac_fix.cc index 21d0da37d1..305e15a525 100644 --- a/api/audio_codecs/isac/audio_decoder_isac_fix.cc +++ b/api/audio_codecs/isac/audio_decoder_isac_fix.cc @@ -19,10 +19,11 @@ namespace webrtc { absl::optional AudioDecoderIsacFix::SdpToConfig( const SdpAudioFormat& format) { - return absl::EqualsIgnoreCase(format.name, "ISAC") && - format.clockrate_hz == 16000 && format.num_channels == 1 - ? absl::optional(Config()) - : absl::nullopt; + if (absl::EqualsIgnoreCase(format.name, "ISAC") && + format.clockrate_hz == 16000 && format.num_channels == 1) { + return Config(); + } + return absl::nullopt; } void AudioDecoderIsacFix::AppendSupportedDecoders( diff --git a/api/audio_codecs/isac/audio_decoder_isac_float.cc b/api/audio_codecs/isac/audio_decoder_isac_float.cc index 4efc2ea9a3..683eb6c0ad 100644 --- a/api/audio_codecs/isac/audio_decoder_isac_float.cc +++ b/api/audio_codecs/isac/audio_decoder_isac_float.cc @@ -24,6 +24,10 @@ AudioDecoderIsacFloat::SdpToConfig(const SdpAudioFormat& format) { format.num_channels == 1) { Config config; config.sample_rate_hz = format.clockrate_hz; + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return absl::nullopt; + } return config; } else { return absl::nullopt; @@ -39,9 +43,12 @@ void AudioDecoderIsacFloat::AppendSupportedDecoders( std::unique_ptr AudioDecoderIsacFloat::MakeAudioDecoder( Config config, absl::optional /*codec_pair_id*/) { - RTC_DCHECK(config.IsOk()); AudioDecoderIsacFloatImpl::Config c; c.sample_rate_hz = config.sample_rate_hz; + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return nullptr; + } return std::make_unique(c); } diff --git a/api/audio_codecs/isac/audio_encoder_isac_fix.cc b/api/audio_codecs/isac/audio_encoder_isac_fix.cc index 7cf55b9163..b590be1ea3 100644 --- a/api/audio_codecs/isac/audio_encoder_isac_fix.cc +++ b/api/audio_codecs/isac/audio_encoder_isac_fix.cc @@ -30,6 +30,10 @@ absl::optional AudioEncoderIsacFix::SdpToConfig( config.frame_size_ms = 60; } } + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return absl::nullopt; + } return config; } else { return absl::nullopt; @@ -53,11 +57,14 @@ std::unique_ptr AudioEncoderIsacFix::MakeAudioEncoder( AudioEncoderIsacFix::Config config, int payload_type, absl::optional /*codec_pair_id*/) { - RTC_DCHECK(config.IsOk()); AudioEncoderIsacFixImpl::Config c; c.frame_size_ms = config.frame_size_ms; c.bit_rate = config.bit_rate; c.payload_type = payload_type; + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return nullptr; + } return std::make_unique(c); } diff --git a/api/audio_codecs/isac/audio_encoder_isac_float.cc b/api/audio_codecs/isac/audio_encoder_isac_float.cc index 6f684c881b..e2afeae84e 100644 --- a/api/audio_codecs/isac/audio_encoder_isac_float.cc +++ b/api/audio_codecs/isac/audio_encoder_isac_float.cc @@ -37,6 +37,10 @@ AudioEncoderIsacFloat::SdpToConfig(const SdpAudioFormat& format) { } } } + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return absl::nullopt; + } return config; } else { return absl::nullopt; @@ -65,12 +69,15 @@ std::unique_ptr AudioEncoderIsacFloat::MakeAudioEncoder( const AudioEncoderIsacFloat::Config& config, int payload_type, absl::optional /*codec_pair_id*/) { - RTC_DCHECK(config.IsOk()); AudioEncoderIsacFloatImpl::Config c; c.payload_type = payload_type; c.sample_rate_hz = config.sample_rate_hz; c.frame_size_ms = config.frame_size_ms; c.bit_rate = config.bit_rate; + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return nullptr; + } return std::make_unique(c); } diff --git a/api/audio_codecs/opus/audio_decoder_multi_channel_opus_config.h b/api/audio_codecs/opus/audio_decoder_multi_channel_opus_config.h index 30bc76e354..7350045bf5 100644 --- a/api/audio_codecs/opus/audio_decoder_multi_channel_opus_config.h +++ b/api/audio_codecs/opus/audio_decoder_multi_channel_opus_config.h @@ -30,7 +30,8 @@ struct AudioDecoderMultiChannelOpusConfig { std::vector channel_mapping; bool IsOk() const { - if (num_channels < 0 || num_streams < 0 || coupled_streams < 0) { + if (num_channels < 1 || num_channels > AudioDecoder::kMaxNumberOfChannels || + num_streams < 0 || coupled_streams < 0) { return false; } if (num_streams < coupled_streams) { diff --git a/api/audio_codecs/opus/audio_decoder_opus.cc b/api/audio_codecs/opus/audio_decoder_opus.cc index 6b4e0d3797..7e0d88b7ad 100644 --- a/api/audio_codecs/opus/audio_decoder_opus.cc +++ b/api/audio_codecs/opus/audio_decoder_opus.cc @@ -51,7 +51,10 @@ absl::optional AudioDecoderOpus::SdpToConfig( num_channels) { Config config; config.num_channels = *num_channels; - RTC_DCHECK(config.IsOk()); + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return absl::nullopt; + } return config; } else { return absl::nullopt; @@ -71,7 +74,10 @@ void AudioDecoderOpus::AppendSupportedDecoders( std::unique_ptr AudioDecoderOpus::MakeAudioDecoder( Config config, absl::optional /*codec_pair_id*/) { - RTC_DCHECK(config.IsOk()); + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return nullptr; + } return std::make_unique(config.num_channels, config.sample_rate_hz); } diff --git a/api/audio_codecs/opus/audio_encoder_opus.cc b/api/audio_codecs/opus/audio_encoder_opus.cc index 36d82b3eff..6d950c5e74 100644 --- a/api/audio_codecs/opus/audio_encoder_opus.cc +++ b/api/audio_codecs/opus/audio_encoder_opus.cc @@ -33,6 +33,10 @@ std::unique_ptr AudioEncoderOpus::MakeAudioEncoder( const AudioEncoderOpusConfig& config, int payload_type, absl::optional /*codec_pair_id*/) { + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return nullptr; + } return AudioEncoderOpusImpl::MakeAudioEncoder(config, payload_type); } diff --git a/api/audio_codecs/opus/audio_encoder_opus_config.h b/api/audio_codecs/opus/audio_encoder_opus_config.h index 3c412b7a30..d5d7256c70 100644 --- a/api/audio_codecs/opus/audio_encoder_opus_config.h +++ b/api/audio_codecs/opus/audio_encoder_opus_config.h @@ -49,10 +49,10 @@ struct RTC_EXPORT AudioEncoderOpusConfig { bool cbr_enabled; int max_playback_rate_hz; - // |complexity| is used when the bitrate goes above - // |complexity_threshold_bps| + |complexity_threshold_window_bps|; - // |low_rate_complexity| is used when the bitrate falls below - // |complexity_threshold_bps| - |complexity_threshold_window_bps|. In the + // `complexity` is used when the bitrate goes above + // `complexity_threshold_bps` + `complexity_threshold_window_bps`; + // `low_rate_complexity` is used when the bitrate falls below + // `complexity_threshold_bps` - `complexity_threshold_window_bps`. In the // interval in the middle, we keep using the most recent of the two // complexity settings. int complexity; diff --git a/api/audio_options.cc b/api/audio_options.cc index 6832bbe294..fad35cb881 100644 --- a/api/audio_options.cc +++ b/api/audio_options.cc @@ -55,16 +55,11 @@ void AudioOptions::SetAll(const AudioOptions& change) { SetFrom(&audio_jitter_buffer_enable_rtx_handling, change.audio_jitter_buffer_enable_rtx_handling); SetFrom(&typing_detection, change.typing_detection); - SetFrom(&experimental_agc, change.experimental_agc); - SetFrom(&experimental_ns, change.experimental_ns); SetFrom(&residual_echo_detector, change.residual_echo_detector); - SetFrom(&tx_agc_target_dbov, change.tx_agc_target_dbov); - SetFrom(&tx_agc_digital_compression_gain, - change.tx_agc_digital_compression_gain); - SetFrom(&tx_agc_limiter, change.tx_agc_limiter); SetFrom(&combined_audio_video_bwe, change.combined_audio_video_bwe); SetFrom(&audio_network_adaptor, change.audio_network_adaptor); SetFrom(&audio_network_adaptor_config, change.audio_network_adaptor_config); + SetFrom(&init_recording_on_send, change.init_recording_on_send); } bool AudioOptions::operator==(const AudioOptions& o) const { @@ -84,15 +79,11 @@ bool AudioOptions::operator==(const AudioOptions& o) const { audio_jitter_buffer_enable_rtx_handling == o.audio_jitter_buffer_enable_rtx_handling && typing_detection == o.typing_detection && - experimental_agc == o.experimental_agc && - experimental_ns == o.experimental_ns && residual_echo_detector == o.residual_echo_detector && - tx_agc_target_dbov == o.tx_agc_target_dbov && - tx_agc_digital_compression_gain == o.tx_agc_digital_compression_gain && - tx_agc_limiter == o.tx_agc_limiter && combined_audio_video_bwe == o.combined_audio_video_bwe && audio_network_adaptor == o.audio_network_adaptor && - audio_network_adaptor_config == o.audio_network_adaptor_config; + audio_network_adaptor_config == o.audio_network_adaptor_config && + init_recording_on_send == o.init_recording_on_send; } std::string AudioOptions::ToString() const { @@ -117,15 +108,10 @@ std::string AudioOptions::ToString() const { ToStringIfSet(&result, "audio_jitter_buffer_enable_rtx_handling", audio_jitter_buffer_enable_rtx_handling); ToStringIfSet(&result, "typing", typing_detection); - ToStringIfSet(&result, "experimental_agc", experimental_agc); - ToStringIfSet(&result, "experimental_ns", experimental_ns); ToStringIfSet(&result, "residual_echo_detector", residual_echo_detector); - ToStringIfSet(&result, "tx_agc_target_dbov", tx_agc_target_dbov); - ToStringIfSet(&result, "tx_agc_digital_compression_gain", - tx_agc_digital_compression_gain); - ToStringIfSet(&result, "tx_agc_limiter", tx_agc_limiter); ToStringIfSet(&result, "combined_audio_video_bwe", combined_audio_video_bwe); ToStringIfSet(&result, "audio_network_adaptor", audio_network_adaptor); + ToStringIfSet(&result, "init_recording_on_send", init_recording_on_send); result << "}"; return result.str(); } diff --git a/api/audio_options.h b/api/audio_options.h index 1b0d1ad0bd..3fcc38d83f 100644 --- a/api/audio_options.h +++ b/api/audio_options.h @@ -60,15 +60,14 @@ struct RTC_EXPORT AudioOptions { absl::optional audio_jitter_buffer_min_delay_ms; // Audio receiver jitter buffer (NetEq) should handle retransmitted packets. absl::optional audio_jitter_buffer_enable_rtx_handling; + // Deprecated. + // TODO(bugs.webrtc.org/11226): Remove. // Audio processing to detect typing. absl::optional typing_detection; - absl::optional experimental_agc; - absl::optional experimental_ns; - // Note that tx_agc_* only applies to non-experimental AGC. + // TODO(bugs.webrtc.org/11539): Deprecated, replaced by + // webrtc::CreateEchoDetector() and injection when creating the audio + // processing module. absl::optional residual_echo_detector; - absl::optional tx_agc_target_dbov; - absl::optional tx_agc_digital_compression_gain; - absl::optional tx_agc_limiter; // Enable combined audio+bandwidth BWE. // TODO(pthatcher): This flag is set from the // "googCombinedAudioVideoBwe", but not used anywhere. So delete it, @@ -80,6 +79,10 @@ struct RTC_EXPORT AudioOptions { absl::optional audio_network_adaptor; // Config string for audio network adaptor. absl::optional audio_network_adaptor_config; + // Pre-initialize the ADM for recording when starting to send. Default to + // true. + // TODO(webrtc:13566): Remove this option. See issue for details. + absl::optional init_recording_on_send; }; } // namespace cricket diff --git a/api/call/bitrate_allocation.h b/api/call/bitrate_allocation.h index 13c7f74b3a..4b4e5e7ae1 100644 --- a/api/call/bitrate_allocation.h +++ b/api/call/bitrate_allocation.h @@ -32,7 +32,7 @@ struct BitrateAllocationUpdate { double packet_loss_ratio = 0; // Predicted round trip time. TimeDelta round_trip_time = TimeDelta::PlusInfinity(); - // |bwe_period| is deprecated, use |stable_target_bitrate| allocation instead. + // `bwe_period` is deprecated, use `stable_target_bitrate` allocation instead. TimeDelta bwe_period = TimeDelta::PlusInfinity(); // Congestion window pushback bitrate reduction fraction. Used in // VideoStreamEncoder to reduce the bitrate by the given fraction diff --git a/api/candidate.cc b/api/candidate.cc index d5fe3a0672..4d17256c2e 100644 --- a/api/candidate.cc +++ b/api/candidate.cc @@ -27,14 +27,14 @@ Candidate::Candidate() network_cost_(0) {} Candidate::Candidate(int component, - const std::string& protocol, + absl::string_view protocol, const rtc::SocketAddress& address, uint32_t priority, - const std::string& username, - const std::string& password, - const std::string& type, + absl::string_view username, + absl::string_view password, + absl::string_view type, uint32_t generation, - const std::string& foundation, + absl::string_view foundation, uint16_t network_id, uint16_t network_cost) : id_(rtc::CreateRandomString(8)), @@ -92,7 +92,7 @@ uint32_t Candidate::GetPriority(uint32_t type_preference, // (2^8)*(local preference) + // (2^0)*(256 - component ID) - // |local_preference| length is 2 bytes, 0-65535 inclusive. + // `local_preference` length is 2 bytes, 0-65535 inclusive. // In our implemenation we will partion local_preference into // 0 1 // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 @@ -101,7 +101,9 @@ uint32_t Candidate::GetPriority(uint32_t type_preference, // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // NIC Type - Type of the network adapter e.g. 3G/Wifi/Wired. // Addr Pref - Address preference value as per RFC 3484. - // local preference = (NIC Type << 8 | Addr_Pref) - relay preference. + // local preference = (NIC Type << 8 | Addr_Pref) + relay preference. + // The relay preference is based on the number of TURN servers, the + // first TURN server gets the highest preference. int addr_pref = IPAddressPrecedence(address_.ipaddr()); int local_preference = @@ -153,4 +155,11 @@ Candidate Candidate::ToSanitizedCopy(bool use_hostname_address, return copy; } +void Candidate::Assign(std::string& s, absl::string_view view) { + // Assigning via a temporary object, like s = std::string(view), results in + // binary size bloat. To avoid that, extract pointer and size from the + // string view, and use std::string::assign method. + s.assign(view.data(), view.size()); +} + } // namespace cricket diff --git a/api/candidate.h b/api/candidate.h index 745205575b..ecfdee3fcb 100644 --- a/api/candidate.h +++ b/api/candidate.h @@ -17,6 +17,7 @@ #include #include +#include "absl/strings/string_view.h" #include "rtc_base/checks.h" #include "rtc_base/network_constants.h" #include "rtc_base/socket_address.h" @@ -33,32 +34,32 @@ class RTC_EXPORT Candidate { // TODO(pthatcher): Match the ordering and param list as per RFC 5245 // candidate-attribute syntax. http://tools.ietf.org/html/rfc5245#section-15.1 Candidate(int component, - const std::string& protocol, + absl::string_view protocol, const rtc::SocketAddress& address, uint32_t priority, - const std::string& username, - const std::string& password, - const std::string& type, + absl::string_view username, + absl::string_view password, + absl::string_view type, uint32_t generation, - const std::string& foundation, + absl::string_view foundation, uint16_t network_id = 0, uint16_t network_cost = 0); Candidate(const Candidate&); ~Candidate(); const std::string& id() const { return id_; } - void set_id(const std::string& id) { id_ = id; } + void set_id(absl::string_view id) { Assign(id_, id); } int component() const { return component_; } void set_component(int component) { component_ = component; } const std::string& protocol() const { return protocol_; } - void set_protocol(const std::string& protocol) { protocol_ = protocol; } + void set_protocol(absl::string_view protocol) { Assign(protocol_, protocol); } // The protocol used to talk to relay. const std::string& relay_protocol() const { return relay_protocol_; } - void set_relay_protocol(const std::string& protocol) { - relay_protocol_ = protocol; + void set_relay_protocol(absl::string_view protocol) { + Assign(relay_protocol_, protocol); } const rtc::SocketAddress& address() const { return address_; } @@ -90,17 +91,17 @@ class RTC_EXPORT Candidate { // TODO(honghaiz): Change to usernameFragment or ufrag. const std::string& username() const { return username_; } - void set_username(const std::string& username) { username_ = username; } + void set_username(absl::string_view username) { Assign(username_, username); } const std::string& password() const { return password_; } - void set_password(const std::string& password) { password_ = password; } + void set_password(absl::string_view password) { Assign(password_, password); } const std::string& type() const { return type_; } - void set_type(const std::string& type) { type_ = type; } + void set_type(absl::string_view type) { Assign(type_, type); } const std::string& network_name() const { return network_name_; } - void set_network_name(const std::string& network_name) { - network_name_ = network_name; + void set_network_name(absl::string_view network_name) { + Assign(network_name_, network_name); } rtc::AdapterType network_type() const { return network_type_; } @@ -112,7 +113,7 @@ class RTC_EXPORT Candidate { uint32_t generation() const { return generation_; } void set_generation(uint32_t generation) { generation_ = generation; } - // |network_cost| measures the cost/penalty of using this candidate. A network + // `network_cost` measures the cost/penalty of using this candidate. A network // cost of 0 indicates this candidate can be used freely. A value of // rtc::kNetworkCostMax indicates it should be used only as the last resort. void set_network_cost(uint16_t network_cost) { @@ -126,8 +127,8 @@ class RTC_EXPORT Candidate { void set_network_id(uint16_t network_id) { network_id_ = network_id; } const std::string& foundation() const { return foundation_; } - void set_foundation(const std::string& foundation) { - foundation_ = foundation; + void set_foundation(absl::string_view foundation) { + Assign(foundation_, foundation); } const rtc::SocketAddress& related_address() const { return related_address_; } @@ -135,18 +136,18 @@ class RTC_EXPORT Candidate { related_address_ = related_address; } const std::string& tcptype() const { return tcptype_; } - void set_tcptype(const std::string& tcptype) { tcptype_ = tcptype; } + void set_tcptype(absl::string_view tcptype) { Assign(tcptype_, tcptype); } // The name of the transport channel of this candidate. // TODO(phoglund): remove. const std::string& transport_name() const { return transport_name_; } - void set_transport_name(const std::string& transport_name) { - transport_name_ = transport_name; + void set_transport_name(absl::string_view transport_name) { + Assign(transport_name_, transport_name); } // The URL of the ICE server which this candidate is gathered from. const std::string& url() const { return url_; } - void set_url(const std::string& url) { url_ = url; } + void set_url(absl::string_view url) { Assign(url_, url); } // Determines whether this candidate is equivalent to the given one. bool IsEquivalent(const Candidate& c) const; @@ -167,9 +168,9 @@ class RTC_EXPORT Candidate { bool operator!=(const Candidate& o) const; // Returns a sanitized copy configured by the given booleans. If - // |use_host_address| is true, the returned copy has its IP removed from - // |address()|, which leads |address()| to be a hostname address. If - // |filter_related_address|, the returned copy has its related address reset + // `use_host_address` is true, the returned copy has its IP removed from + // `address()`, which leads `address()` to be a hostname address. If + // `filter_related_address`, the returned copy has its related address reset // to the wildcard address (i.e. 0.0.0.0 for IPv4 and :: for IPv6). Note that // setting both booleans to false returns an identical copy to the original // candidate. @@ -177,6 +178,10 @@ class RTC_EXPORT Candidate { bool filter_related_address) const; private: + // TODO(bugs.webrtc.org/13220): With C++17, we get a std::string assignment + // operator accepting any object implicitly convertible to std::string_view, + // and then we don't need this workaround. + static void Assign(std::string& s, absl::string_view view); std::string ToStringInternal(bool sensitive) const; std::string id_; diff --git a/api/create_peerconnection_factory.cc b/api/create_peerconnection_factory.cc index 008fce3e80..c41b6d6fb2 100644 --- a/api/create_peerconnection_factory.cc +++ b/api/create_peerconnection_factory.cc @@ -49,6 +49,10 @@ rtc::scoped_refptr CreatePeerConnectionFactory( dependencies.task_queue_factory.get()); dependencies.trials = std::make_unique(); + if (network_thread) { + // TODO(bugs.webrtc.org/13145): Add an rtc::SocketFactory* argument. + dependencies.socket_factory = network_thread->socketserver(); + } cricket::MediaEngineDependencies media_dependencies; media_dependencies.task_queue_factory = dependencies.task_queue_factory.get(); media_dependencies.adm = std::move(default_adm); diff --git a/api/crypto/crypto_options.cc b/api/crypto/crypto_options.cc index f47e844596..22c5dd464b 100644 --- a/api/crypto/crypto_options.cc +++ b/api/crypto/crypto_options.cc @@ -32,24 +32,24 @@ CryptoOptions CryptoOptions::NoGcm() { std::vector CryptoOptions::GetSupportedDtlsSrtpCryptoSuites() const { std::vector crypto_suites; - // Note: SRTP_AES128_CM_SHA1_80 is what is required to be supported (by - // draft-ietf-rtcweb-security-arch), but SRTP_AES128_CM_SHA1_32 is allowed as + // Note: kSrtpAes128CmSha1_80 is what is required to be supported (by + // draft-ietf-rtcweb-security-arch), but kSrtpAes128CmSha1_32 is allowed as // well, and saves a few bytes per packet if it ends up selected. // As the cipher suite is potentially insecure, it will only be used if // enabled by both peers. if (srtp.enable_aes128_sha1_32_crypto_cipher) { - crypto_suites.push_back(rtc::SRTP_AES128_CM_SHA1_32); + crypto_suites.push_back(rtc::kSrtpAes128CmSha1_32); } if (srtp.enable_aes128_sha1_80_crypto_cipher) { - crypto_suites.push_back(rtc::SRTP_AES128_CM_SHA1_80); + crypto_suites.push_back(rtc::kSrtpAes128CmSha1_80); } // Note: GCM cipher suites are not the top choice since they increase the // packet size. In order to negotiate them the other side must not support - // SRTP_AES128_CM_SHA1_80. + // kSrtpAes128CmSha1_80. if (srtp.enable_gcm_crypto_suites) { - crypto_suites.push_back(rtc::SRTP_AEAD_AES_256_GCM); - crypto_suites.push_back(rtc::SRTP_AEAD_AES_128_GCM); + crypto_suites.push_back(rtc::kSrtpAeadAes256Gcm); + crypto_suites.push_back(rtc::kSrtpAeadAes128Gcm); } RTC_CHECK(!crypto_suites.empty()); return crypto_suites; diff --git a/api/crypto/crypto_options.h b/api/crypto/crypto_options.h index 5f6cea6c82..317995134a 100644 --- a/api/crypto/crypto_options.h +++ b/api/crypto/crypto_options.h @@ -44,7 +44,7 @@ struct RTC_EXPORT CryptoOptions { bool enable_gcm_crypto_suites = false; // If set to true, the (potentially insecure) crypto cipher - // SRTP_AES128_CM_SHA1_32 will be included in the list of supported ciphers + // kSrtpAes128CmSha1_32 will be included in the list of supported ciphers // during negotiation. It will only be used if both peers support it and no // other ciphers get preferred. bool enable_aes128_sha1_32_crypto_cipher = false; diff --git a/api/data_channel_interface.cc b/api/data_channel_interface.cc index d299cedf45..bddb9d1b0a 100644 --- a/api/data_channel_interface.cc +++ b/api/data_channel_interface.cc @@ -40,4 +40,8 @@ bool DataChannelInterface::negotiated() const { return false; } +uint64_t DataChannelInterface::MaxSendQueueSize() { + return 16 * 1024 * 1024; // 16 MiB +} + } // namespace webrtc diff --git a/api/data_channel_interface.h b/api/data_channel_interface.h index 56bb6c98fb..4f74918ff9 100644 --- a/api/data_channel_interface.h +++ b/api/data_channel_interface.h @@ -42,14 +42,14 @@ struct DataChannelInit { // The max period of time in milliseconds in which retransmissions will be // sent. After this time, no more retransmissions will be sent. // - // Cannot be set along with |maxRetransmits|. - // This is called |maxPacketLifeTime| in the WebRTC JS API. + // Cannot be set along with `maxRetransmits`. + // This is called `maxPacketLifeTime` in the WebRTC JS API. // Negative values are ignored, and positive values are clamped to [0-65535] absl::optional maxRetransmitTime; // The max number of retransmissions. // - // Cannot be set along with |maxRetransmitTime|. + // Cannot be set along with `maxRetransmitTime`. // Negative values are ignored, and positive values are clamped to [0-65535] absl::optional maxRetransmits; @@ -57,7 +57,7 @@ struct DataChannelInit { std::string protocol; // True if the channel has been externally negotiated and we do not send an - // in-band signalling in the form of an "open" message. If this is true, |id| + // in-band signalling in the form of an "open" message. If this is true, `id` // below must be set; otherwise it should be unset and will be negotiated // in-band. bool negotiated = false; @@ -70,7 +70,7 @@ struct DataChannelInit { }; // At the JavaScript level, data can be passed in as a string or a blob, so -// this structure's |binary| flag tells whether the data should be interpreted +// this structure's `binary` flag tells whether the data should be interpreted // as binary or text. struct DataBuffer { DataBuffer(const rtc::CopyOnWriteBuffer& data, bool binary) @@ -174,22 +174,25 @@ class RTC_EXPORT DataChannelInterface : public rtc::RefCountInterface { // Returns the number of bytes of application data (UTF-8 text and binary // data) that have been queued using Send but have not yet been processed at // the SCTP level. See comment above Send below. + // Values are less or equal to MaxSendQueueSize(). virtual uint64_t buffered_amount() const = 0; // Begins the graceful data channel closing procedure. See: // https://tools.ietf.org/html/draft-ietf-rtcweb-data-channel-13#section-6.7 virtual void Close() = 0; - // Sends |data| to the remote peer. If the data can't be sent at the SCTP + // Sends `data` to the remote peer. If the data can't be sent at the SCTP // level (due to congestion control), it's buffered at the data channel level, - // up to a maximum of 16MB. If Send is called while this buffer is full, the - // data channel will be closed abruptly. - // - // So, it's important to use buffered_amount() and OnBufferedAmountChange to - // ensure the data channel is used efficiently but without filling this - // buffer. + // up to a maximum of MaxSendQueueSize(). + // Returns false if the data channel is not in open state or if the send + // buffer is full. + // TODO(webrtc:13289): Return an RTCError with information about the failure. virtual bool Send(const DataBuffer& buffer) = 0; + // Amount of bytes that can be queued for sending on the data channel. + // Those are bytes that have not yet been processed at the SCTP level. + static uint64_t MaxSendQueueSize(); + protected: ~DataChannelInterface() override = default; }; diff --git a/api/dtls_transport_interface.cc b/api/dtls_transport_interface.cc index a68ff8feb0..faebc0972f 100644 --- a/api/dtls_transport_interface.cc +++ b/api/dtls_transport_interface.cc @@ -20,11 +20,27 @@ DtlsTransportInformation::DtlsTransportInformation(DtlsTransportState state) DtlsTransportInformation::DtlsTransportInformation( DtlsTransportState state, + absl::optional role, absl::optional tls_version, absl::optional ssl_cipher_suite, absl::optional srtp_cipher_suite, std::unique_ptr remote_ssl_certificates) : state_(state), + role_(role), + tls_version_(tls_version), + ssl_cipher_suite_(ssl_cipher_suite), + srtp_cipher_suite_(srtp_cipher_suite), + remote_ssl_certificates_(std::move(remote_ssl_certificates)) {} + +// Deprecated version +DtlsTransportInformation::DtlsTransportInformation( + DtlsTransportState state, + absl::optional tls_version, + absl::optional ssl_cipher_suite, + absl::optional srtp_cipher_suite, + std::unique_ptr remote_ssl_certificates) + : state_(state), + role_(absl::nullopt), tls_version_(tls_version), ssl_cipher_suite_(ssl_cipher_suite), srtp_cipher_suite_(srtp_cipher_suite), @@ -33,6 +49,7 @@ DtlsTransportInformation::DtlsTransportInformation( DtlsTransportInformation::DtlsTransportInformation( const DtlsTransportInformation& c) : state_(c.state()), + role_(c.role_), tls_version_(c.tls_version_), ssl_cipher_suite_(c.ssl_cipher_suite_), srtp_cipher_suite_(c.srtp_cipher_suite_), @@ -43,6 +60,7 @@ DtlsTransportInformation::DtlsTransportInformation( DtlsTransportInformation& DtlsTransportInformation::operator=( const DtlsTransportInformation& c) { state_ = c.state(); + role_ = c.role_; tls_version_ = c.tls_version_; ssl_cipher_suite_ = c.ssl_cipher_suite_; srtp_cipher_suite_ = c.srtp_cipher_suite_; diff --git a/api/dtls_transport_interface.h b/api/dtls_transport_interface.h index 86715b0400..7b0151249c 100644 --- a/api/dtls_transport_interface.h +++ b/api/dtls_transport_interface.h @@ -36,6 +36,11 @@ enum class DtlsTransportState { kNumValues }; +enum class DtlsTransportTlsRole { + kServer, // Other end sends CLIENT_HELLO + kClient // This end sends CLIENT_HELLO +}; + // This object gives snapshot information about the changeable state of a // DTLSTransport. class RTC_EXPORT DtlsTransportInformation { @@ -44,10 +49,19 @@ class RTC_EXPORT DtlsTransportInformation { explicit DtlsTransportInformation(DtlsTransportState state); DtlsTransportInformation( DtlsTransportState state, + absl::optional role, absl::optional tls_version, absl::optional ssl_cipher_suite, absl::optional srtp_cipher_suite, std::unique_ptr remote_ssl_certificates); + ABSL_DEPRECATED("Use version with role parameter") + DtlsTransportInformation( + DtlsTransportState state, + absl::optional tls_version, + absl::optional ssl_cipher_suite, + absl::optional srtp_cipher_suite, + std::unique_ptr remote_ssl_certificates); + // Copy and assign DtlsTransportInformation(const DtlsTransportInformation& c); DtlsTransportInformation& operator=(const DtlsTransportInformation& c); @@ -57,6 +71,7 @@ class RTC_EXPORT DtlsTransportInformation { default; DtlsTransportState state() const { return state_; } + absl::optional role() const { return role_; } absl::optional tls_version() const { return tls_version_; } absl::optional ssl_cipher_suite() const { return ssl_cipher_suite_; } absl::optional srtp_cipher_suite() const { return srtp_cipher_suite_; } @@ -67,6 +82,7 @@ class RTC_EXPORT DtlsTransportInformation { private: DtlsTransportState state_; + absl::optional role_; absl::optional tls_version_; absl::optional ssl_cipher_suite_; absl::optional srtp_cipher_suite_; diff --git a/api/dtmf_sender_interface.h b/api/dtmf_sender_interface.h index 7c0e2ce7c3..d63e66bbf7 100644 --- a/api/dtmf_sender_interface.h +++ b/api/dtmf_sender_interface.h @@ -23,8 +23,8 @@ namespace webrtc { // DtmfSender. class DtmfSenderObserverInterface { public: - // Triggered when DTMF |tone| is sent. - // If |tone| is empty that means the DtmfSender has sent out all the given + // Triggered when DTMF `tone` is sent. + // If `tone` is empty that means the DtmfSender has sent out all the given // tones. // The callback includes the state of the tone buffer at the time when // the tone finished playing. @@ -58,7 +58,7 @@ class DtmfSenderInterface : public rtc::RefCountInterface { // able to send packets, and a "telephone-event" codec must be negotiated. virtual bool CanInsertDtmf() = 0; - // Queues a task that sends the DTMF |tones|. The |tones| parameter is treated + // Queues a task that sends the DTMF `tones`. The `tones` parameter is treated // as a series of characters. The characters 0 through 9, A through D, #, and // * generate the associated DTMF tones. The characters a to d are equivalent // to A to D. The character ',' indicates a delay of 2 seconds before @@ -66,18 +66,18 @@ class DtmfSenderInterface : public rtc::RefCountInterface { // // Unrecognized characters are ignored. // - // The |duration| parameter indicates the duration in ms to use for each - // character passed in the |tones| parameter. The duration cannot be more + // The `duration` parameter indicates the duration in ms to use for each + // character passed in the `tones` parameter. The duration cannot be more // than 6000 or less than 70. // - // The |inter_tone_gap| parameter indicates the gap between tones in ms. The - // |inter_tone_gap| must be at least 50 ms but should be as short as + // The `inter_tone_gap` parameter indicates the gap between tones in ms. The + // `inter_tone_gap` must be at least 50 ms but should be as short as // possible. // - // The |comma_delay| parameter indicates the delay after the ',' - // character. InsertDtmf specifies |comma_delay| as an argument + // The `comma_delay` parameter indicates the delay after the ',' + // character. InsertDtmf specifies `comma_delay` as an argument // with a default value of 2 seconds as per the WebRTC spec. This parameter - // allows users to comply with legacy WebRTC clients. The |comma_delay| + // allows users to comply with legacy WebRTC clients. The `comma_delay` // must be at least 50 ms. // // If InsertDtmf is called on the same object while an existing task for this diff --git a/api/fec_controller.h b/api/fec_controller.h index 3e5f7bb216..f3d7a8aa26 100644 --- a/api/fec_controller.h +++ b/api/fec_controller.h @@ -38,7 +38,7 @@ class VCMProtectionCallback { // FecController calculates how much of the allocated network // capacity that can be used by an encoder and how much that // is needed for redundant packets such as FEC and NACK. It uses an -// implementation of |VCMProtectionCallback| to set new FEC parameters and get +// implementation of `VCMProtectionCallback` to set new FEC parameters and get // the bitrate currently used for FEC and NACK. // Usage: // Setup by calling SetProtectionMethod and SetEncodingData. diff --git a/api/frame_transformer_interface.h b/api/frame_transformer_interface.h index 2cfe6edb88..de2c612ac0 100644 --- a/api/frame_transformer_interface.h +++ b/api/frame_transformer_interface.h @@ -30,11 +30,22 @@ class TransformableFrameInterface { // method call. virtual rtc::ArrayView GetData() const = 0; - // Copies |data| into the owned frame payload data. + // Copies `data` into the owned frame payload data. virtual void SetData(rtc::ArrayView data) = 0; - virtual uint32_t GetTimestamp() const = 0; + virtual uint8_t GetPayloadType() const = 0; virtual uint32_t GetSsrc() const = 0; + virtual uint32_t GetTimestamp() const = 0; + + enum class Direction { + kUnknown, + kReceiver, + kSender, + }; + // TODO(crbug.com/1250638): Remove this distinction between receiver and + // sender frames to allow received frames to be directly re-transmitted on + // other PeerConnectionss. + virtual Direction GetDirection() const { return Direction::kUnknown; } }; class TransformableVideoFrameInterface : public TransformableFrameInterface { @@ -78,7 +89,7 @@ class TransformedFrameCallback : public rtc::RefCountInterface { // the TransformedFrameCallback interface (see above). class FrameTransformerInterface : public rtc::RefCountInterface { public: - // Transforms |frame| using the implementing class' processing logic. + // Transforms `frame` using the implementing class' processing logic. virtual void Transform( std::unique_ptr transformable_frame) = 0; diff --git a/api/g3doc/index.md b/api/g3doc/index.md index 49637d191a..0c7136c039 100644 --- a/api/g3doc/index.md +++ b/api/g3doc/index.md @@ -15,7 +15,7 @@ some basic concepts, such as: ## Using WebRTC through the PeerConnection class The -[PeerConnectionInterface](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/peer_connection_interface.h?q=webrtc::PeerConnectionInterface) +[PeerConnectionInterface](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/peer_connection_interface.h?q=webrtc::PeerConnectionInterface) class is the recommended way to use the WebRTC library. It is closely modeled after the Javascript API documented in the [WebRTC diff --git a/api/ice_transport_factory.h b/api/ice_transport_factory.h index a9fd04ee7b..2268ea5e12 100644 --- a/api/ice_transport_factory.h +++ b/api/ice_transport_factory.h @@ -35,9 +35,9 @@ RTC_EXPORT rtc::scoped_refptr CreateIceTransport( // without using a webrtc::PeerConnection. // The returned object must be accessed and destroyed on the thread that // created it. -// |init.port_allocator()| is required and must outlive the created +// `init.port_allocator()` is required and must outlive the created // IceTransportInterface object. -// |init.async_resolver_factory()| and |init.event_log()| are optional, but if +// `init.async_resolver_factory()` and `init.event_log()` are optional, but if // provided must outlive the created IceTransportInterface object. RTC_EXPORT rtc::scoped_refptr CreateIceTransport( IceTransportInit); diff --git a/api/jsep.h b/api/jsep.h index b56cf1d15b..d2aa57c784 100644 --- a/api/jsep.h +++ b/api/jsep.h @@ -73,7 +73,7 @@ class RTC_EXPORT IceCandidateInterface { // Creates a IceCandidateInterface based on SDP string. // Returns null if the sdp string can't be parsed. -// |error| may be null. +// `error` may be null. RTC_EXPORT IceCandidateInterface* CreateIceCandidate(const std::string& sdp_mid, int sdp_mline_index, const std::string& sdp, @@ -91,7 +91,7 @@ class IceCandidateCollection { public: virtual ~IceCandidateCollection() {} virtual size_t count() const = 0; - // Returns true if an equivalent |candidate| exist in the collection. + // Returns true if an equivalent `candidate` exist in the collection. virtual bool HasCandidate(const IceCandidateInterface* candidate) const = 0; virtual const IceCandidateInterface* at(size_t index) const = 0; }; @@ -158,7 +158,7 @@ class RTC_EXPORT SessionDescriptionInterface { virtual SdpType GetType() const; // kOffer/kPrAnswer/kAnswer - // TODO(steveanton): Remove this in favor of |GetType| that returns SdpType. + // TODO(steveanton): Remove this in favor of `GetType` that returns SdpType. virtual std::string type() const = 0; // Adds the specified candidate to the description. @@ -166,8 +166,8 @@ class RTC_EXPORT SessionDescriptionInterface { // Ownership is not transferred. // // Returns false if the session description does not have a media section - // that corresponds to |candidate.sdp_mid()| or - // |candidate.sdp_mline_index()|. + // that corresponds to `candidate.sdp_mid()` or + // `candidate.sdp_mline_index()`. virtual bool AddCandidate(const IceCandidateInterface* candidate) = 0; // Removes the candidates from the description, if found. @@ -190,7 +190,7 @@ class RTC_EXPORT SessionDescriptionInterface { // Creates a SessionDescriptionInterface based on the SDP string and the type. // Returns null if the sdp string can't be parsed or the type is unsupported. -// |error| may be null. +// `error` may be null. // TODO(steveanton): This function is deprecated. Please use the functions below // which take an SdpType enum instead. Remove this once it is no longer used. RTC_EXPORT SessionDescriptionInterface* CreateSessionDescription( @@ -200,8 +200,8 @@ RTC_EXPORT SessionDescriptionInterface* CreateSessionDescription( // Creates a SessionDescriptionInterface based on the SDP string and the type. // Returns null if the SDP string cannot be parsed. -// If using the signature with |error_out|, details of the parsing error may be -// written to |error_out| if it is not null. +// If using the signature with `error_out`, details of the parsing error may be +// written to `error_out` if it is not null. RTC_EXPORT std::unique_ptr CreateSessionDescription(SdpType type, const std::string& sdp); RTC_EXPORT std::unique_ptr @@ -221,7 +221,7 @@ std::unique_ptr CreateSessionDescription( class RTC_EXPORT CreateSessionDescriptionObserver : public rtc::RefCountInterface { public: - // This callback transfers the ownership of the |desc|. + // This callback transfers the ownership of the `desc`. // TODO(deadbeef): Make this take an std::unique_ptr<> to avoid confusion // around ownership. virtual void OnSuccess(SessionDescriptionInterface* desc) = 0; diff --git a/api/jsep_ice_candidate.h b/api/jsep_ice_candidate.h index 1a4247cb07..8f47a102e7 100644 --- a/api/jsep_ice_candidate.h +++ b/api/jsep_ice_candidate.h @@ -22,7 +22,6 @@ #include "api/candidate.h" #include "api/jsep.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/system/rtc_export.h" namespace webrtc { @@ -37,7 +36,7 @@ class RTC_EXPORT JsepIceCandidate : public IceCandidateInterface { JsepIceCandidate(const JsepIceCandidate&) = delete; JsepIceCandidate& operator=(const JsepIceCandidate&) = delete; ~JsepIceCandidate() override; - // |err| may be null. + // `err` may be null. bool Initialize(const std::string& sdp, SdpParseError* err); void SetCandidate(const cricket::Candidate& candidate) { candidate_ = candidate; @@ -64,6 +63,10 @@ class JsepCandidateCollection : public IceCandidateCollection { // Move constructor is defined so that a vector of JsepCandidateCollections // can be resized. JsepCandidateCollection(JsepCandidateCollection&& o); + + JsepCandidateCollection(const JsepCandidateCollection&) = delete; + JsepCandidateCollection& operator=(const JsepCandidateCollection&) = delete; + // Returns a copy of the candidate collection. JsepCandidateCollection Clone() const; size_t count() const override; @@ -80,8 +83,6 @@ class JsepCandidateCollection : public IceCandidateCollection { private: std::vector> candidates_; - - RTC_DISALLOW_COPY_AND_ASSIGN(JsepCandidateCollection); }; } // namespace webrtc diff --git a/api/jsep_session_description.h b/api/jsep_session_description.h index 70ac9398a6..0b65734ea9 100644 --- a/api/jsep_session_description.h +++ b/api/jsep_session_description.h @@ -22,7 +22,6 @@ #include "api/candidate.h" #include "api/jsep.h" #include "api/jsep_ice_candidate.h" -#include "rtc_base/constructor_magic.h" namespace cricket { class SessionDescription; @@ -43,7 +42,10 @@ class JsepSessionDescription : public SessionDescriptionInterface { absl::string_view session_version); virtual ~JsepSessionDescription(); - // Takes ownership of |description|. + JsepSessionDescription(const JsepSessionDescription&) = delete; + JsepSessionDescription& operator=(const JsepSessionDescription&) = delete; + + // Takes ownership of `description`. bool Initialize(std::unique_ptr description, const std::string& session_id, const std::string& session_version); @@ -82,8 +84,6 @@ class JsepSessionDescription : public SessionDescriptionInterface { bool GetMediasectionIndex(const IceCandidateInterface* candidate, size_t* index); int GetMediasectionIndex(const cricket::Candidate& candidate); - - RTC_DISALLOW_COPY_AND_ASSIGN(JsepSessionDescription); }; } // namespace webrtc diff --git a/api/media_stream_interface.h b/api/media_stream_interface.h index 8892ee5a0b..7e010289a0 100644 --- a/api/media_stream_interface.h +++ b/api/media_stream_interface.h @@ -28,6 +28,7 @@ #include "api/video/video_frame.h" #include "api/video/video_sink_interface.h" #include "api/video/video_source_interface.h" +#include "api/video_track_source_constraints.h" #include "modules/audio_processing/include/audio_processing_statistics.h" #include "rtc_base/ref_count.h" #include "rtc_base/system/rtc_export.h" @@ -146,8 +147,6 @@ class VideoTrackSourceInterface : public MediaSourceInterface, // Add an encoded video sink to the source and additionally cause // a key frame to be generated from the source. The sink will be // invoked from a decoder queue. - // TODO(bugs.webrtc.org/11114): make pure virtual once downstream project - // adapts. virtual void AddEncodedSink( rtc::VideoSinkInterface* sink) = 0; @@ -155,6 +154,13 @@ class VideoTrackSourceInterface : public MediaSourceInterface, virtual void RemoveEncodedSink( rtc::VideoSinkInterface* sink) = 0; + // Notify about constraints set on the source. The information eventually gets + // routed to attached sinks via VideoSinkInterface<>::OnConstraintsChanged. + // The call is expected to happen on the network thread. + // TODO(crbug/1255737): make pure virtual once downstream project adapts. + virtual void ProcessConstraints( + const webrtc::VideoTrackSourceConstraints& constraints) {} + protected: ~VideoTrackSourceInterface() override = default; }; @@ -197,10 +203,10 @@ class AudioTrackSinkInterface { int sample_rate, size_t number_of_channels, size_t number_of_frames) { - RTC_NOTREACHED() << "This method must be overridden, or not used."; + RTC_DCHECK_NOTREACHED() << "This method must be overridden, or not used."; } - // In this method, |absolute_capture_timestamp_ms|, when available, is + // In this method, `absolute_capture_timestamp_ms`, when available, is // supposed to deliver the timestamp when this audio frame was originally // captured. This timestamp MUST be based on the same clock as // rtc::TimeMillis(). @@ -240,7 +246,7 @@ class RTC_EXPORT AudioSourceInterface : public MediaSourceInterface { // TODO(deadbeef): Makes all the interfaces pure virtual after they're // implemented in chromium. - // Sets the volume of the source. |volume| is in the range of [0, 10]. + // Sets the volume of the source. `volume` is in the range of [0, 10]. // TODO(tommi): This method should be on the track and ideally volume should // be applied in the track in a way that does not affect clones of the track. virtual void SetVolume(double volume) {} @@ -268,7 +274,7 @@ class AudioProcessorInterface : public rtc::RefCountInterface { AudioProcessingStats apm_statistics; }; - // Get audio processor statistics. The |has_remote_tracks| argument should be + // Get audio processor statistics. The `has_remote_tracks` argument should be // set if there are active remote tracks (this would usually be true during // a call). If there are no remote tracks some of the stats will not be set by // the AudioProcessor, because they only make sense if there is at least one @@ -327,6 +333,8 @@ class MediaStreamInterface : public rtc::RefCountInterface, virtual rtc::scoped_refptr FindVideoTrack( const std::string& track_id) = 0; + // Takes ownership of added tracks. + // TODO(hta): Should take scoped_refptr rather than raw pointer. virtual bool AddTrack(AudioTrackInterface* track) = 0; virtual bool AddTrack(VideoTrackInterface* track) = 0; virtual bool RemoveTrack(AudioTrackInterface* track) = 0; diff --git a/api/media_types.cc b/api/media_types.cc index 3453ce3905..5c7d55b876 100644 --- a/api/media_types.cc +++ b/api/media_types.cc @@ -28,7 +28,7 @@ std::string MediaTypeToString(MediaType type) { return kMediaTypeData; case MEDIA_TYPE_UNSUPPORTED: // Unsupported media stores the m= differently. - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return ""; } RTC_CHECK_NOTREACHED(); diff --git a/rtc_tools/loopback_test/run-server.sh b/api/metronome/BUILD.gn old mode 100755 new mode 100644 similarity index 53% rename from rtc_tools/loopback_test/run-server.sh rename to api/metronome/BUILD.gn index 35c0797c24..3d3d876df0 --- a/rtc_tools/loopback_test/run-server.sh +++ b/api/metronome/BUILD.gn @@ -1,15 +1,19 @@ -#!/bin/sh -# -# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. +# Copyright (c) 2022 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license # that can be found in the LICENSE file in the root of the source # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. -# -# This script is used to launch a simple http server for files in the same -# location as the script itself. -cd "`dirname \"$0\"`" -echo "Starting http server in port 8080." -exec python -m SimpleHTTPServer 8080 + +import("../../webrtc.gni") + +rtc_source_set("metronome") { + visibility = [ "*" ] + sources = [ "metronome.h" ] + deps = [ + "../../rtc_base/system:rtc_export", + "../task_queue", + "../units:time_delta", + ] +} diff --git a/api/metronome/metronome.h b/api/metronome/metronome.h new file mode 100644 index 0000000000..fc5f350db2 --- /dev/null +++ b/api/metronome/metronome.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_METRONOME_METRONOME_H_ +#define API_METRONOME_METRONOME_H_ + +#include "api/task_queue/task_queue_base.h" +#include "api/units/time_delta.h" +#include "rtc_base/system/rtc_export.h" + +namespace webrtc { + +// The Metronome posts OnTick() on task queues provided by its listeners' task +// queue periodically. The metronome can be used as an alternative to using +// PostDelayedTask on a thread or task queue for coalescing work and reducing +// the number of idle-wakeups. +// +// Listeners can be added and removed from any sequence, but it is illegal to +// remove a listener from an OnTick invocation. +// +// The metronome concept is still under experimentation, and may not be availble +// in all platforms or applications. See https://crbug.com/1253787 for more +// details. +// +// Metronome implementations must be thread-safe. +class RTC_EXPORT Metronome { + public: + class RTC_EXPORT TickListener { + public: + virtual ~TickListener() = default; + + // OnTick is run on the task queue provided by OnTickTaskQueue each time the + // metronome ticks. + virtual void OnTick() = 0; + + // The task queue that OnTick will run on. Must not be null. + virtual TaskQueueBase* OnTickTaskQueue() = 0; + }; + + virtual ~Metronome() = default; + + // Adds a tick listener to the metronome. Once this method has returned + // OnTick will be invoked on each metronome tick. A listener may + // only be added to the metronome once. + virtual void AddListener(TickListener* listener) = 0; + + // Removes the tick listener from the metronome. Once this method has returned + // OnTick will never be called again. This method must not be called from + // within OnTick. + virtual void RemoveListener(TickListener* listener) = 0; + + // Returns the current tick period of the metronome. + virtual TimeDelta TickPeriod() const = 0; +}; + +} // namespace webrtc + +#endif // API_METRONOME_METRONOME_H_ diff --git a/api/metronome/test/BUILD.gn b/api/metronome/test/BUILD.gn new file mode 100644 index 0000000000..d25d5a848a --- /dev/null +++ b/api/metronome/test/BUILD.gn @@ -0,0 +1,30 @@ +# Copyright (c) 2022 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +rtc_library("fake_metronome") { + testonly = true + sources = [ + "fake_metronome.cc", + "fake_metronome.h", + ] + deps = [ + "..:metronome", + "../..:priority", + "../..:sequence_checker", + "../../../rtc_base:macromagic", + "../../../rtc_base:rtc_event", + "../../../rtc_base:rtc_task_queue", + "../../../rtc_base/synchronization:mutex", + "../../../rtc_base/task_utils:repeating_task", + "../../../rtc_base/task_utils:to_queued_task", + "../../task_queue", + "../../units:time_delta", + ] +} diff --git a/api/metronome/test/fake_metronome.cc b/api/metronome/test/fake_metronome.cc new file mode 100644 index 0000000000..83b5ea7604 --- /dev/null +++ b/api/metronome/test/fake_metronome.cc @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "api/metronome/test/fake_metronome.h" + +#include "api/priority.h" +#include "api/sequence_checker.h" +#include "api/task_queue/task_queue_factory.h" +#include "api/units/time_delta.h" +#include "rtc_base/event.h" +#include "rtc_base/task_utils/repeating_task.h" +#include "rtc_base/task_utils/to_queued_task.h" + +namespace webrtc::test { + +ForcedTickMetronome::ForcedTickMetronome(TimeDelta tick_period) + : tick_period_(tick_period) {} + +void ForcedTickMetronome::AddListener(TickListener* listener) { + listeners_.insert(listener); +} + +void ForcedTickMetronome::RemoveListener(TickListener* listener) { + listeners_.erase(listener); +} + +TimeDelta ForcedTickMetronome::TickPeriod() const { + return tick_period_; +} + +size_t ForcedTickMetronome::NumListeners() { + return listeners_.size(); +} + +void ForcedTickMetronome::Tick() { + for (auto* listener : listeners_) { + listener->OnTickTaskQueue()->PostTask( + ToQueuedTask([listener] { listener->OnTick(); })); + } +} + +FakeMetronome::FakeMetronome(TaskQueueFactory* factory, TimeDelta tick_period) + : tick_period_(tick_period), + queue_(factory->CreateTaskQueue("MetronomeQueue", + TaskQueueFactory::Priority::HIGH)) {} + +FakeMetronome::~FakeMetronome() { + RTC_DCHECK(listeners_.empty()); +} + +void FakeMetronome::AddListener(TickListener* listener) { + MutexLock lock(&mutex_); + listeners_.insert(listener); + if (!started_) { + tick_task_ = RepeatingTaskHandle::Start(queue_.Get(), [this] { + MutexLock lock(&mutex_); + // Stop if empty. + if (listeners_.empty()) + return TimeDelta::PlusInfinity(); + for (auto* listener : listeners_) { + listener->OnTickTaskQueue()->PostTask( + ToQueuedTask([listener] { listener->OnTick(); })); + } + return tick_period_; + }); + started_ = true; + } +} + +void FakeMetronome::RemoveListener(TickListener* listener) { + MutexLock lock(&mutex_); + listeners_.erase(listener); +} + +void FakeMetronome::Stop() { + MutexLock lock(&mutex_); + RTC_DCHECK(listeners_.empty()); + if (started_) + queue_.PostTask([this] { tick_task_.Stop(); }); +} + +TimeDelta FakeMetronome::TickPeriod() const { + return tick_period_; +} + +} // namespace webrtc::test diff --git a/api/metronome/test/fake_metronome.h b/api/metronome/test/fake_metronome.h new file mode 100644 index 0000000000..28a79e06ff --- /dev/null +++ b/api/metronome/test/fake_metronome.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2022 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_METRONOME_TEST_FAKE_METRONOME_H_ +#define API_METRONOME_TEST_FAKE_METRONOME_H_ + +#include +#include + +#include "api/metronome/metronome.h" +#include "api/task_queue/task_queue_base.h" +#include "api/task_queue/task_queue_factory.h" +#include "api/units/time_delta.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/task_queue.h" +#include "rtc_base/task_utils/repeating_task.h" +#include "rtc_base/thread_annotations.h" + +namespace webrtc::test { + +// ForcedTickMetronome is a Metronome that ticks when `Tick()` is invoked. +// The constructor argument `tick_period` returned in `TickPeriod()`. +class ForcedTickMetronome : public Metronome { + public: + explicit ForcedTickMetronome(TimeDelta tick_period); + + // Forces all TickListeners to run `OnTick`. + void Tick(); + size_t NumListeners(); + + // Metronome implementation. + void AddListener(TickListener* listener) override; + void RemoveListener(TickListener* listener) override; + TimeDelta TickPeriod() const override; + + private: + const TimeDelta tick_period_; + std::set listeners_; +}; + +// FakeMetronome is a metronome that ticks based on a repeating task at the +// `tick_period` provided in the constructor. It is designed for use with +// simulated task queues for unit tests. +// +// `Stop()` must be called before destruction, as it cancels the metronome tick +// on the proper task queue. +class FakeMetronome : public Metronome { + public: + FakeMetronome(TaskQueueFactory* factory, TimeDelta tick_period); + ~FakeMetronome() override; + + // Metronome implementation. + void AddListener(TickListener* listener) override; + void RemoveListener(TickListener* listener) override; + TimeDelta TickPeriod() const override; + + void Stop(); + + private: + const TimeDelta tick_period_; + RepeatingTaskHandle tick_task_; + bool started_ RTC_GUARDED_BY(mutex_) = false; + std::set listeners_ RTC_GUARDED_BY(mutex_); + Mutex mutex_; + rtc::TaskQueue queue_; +}; + +} // namespace webrtc::test + +#endif // API_METRONOME_TEST_FAKE_METRONOME_H_ diff --git a/api/neteq/neteq.cc b/api/neteq/neteq.cc index e8ef4dbd39..155ddf2cf3 100644 --- a/api/neteq/neteq.cc +++ b/api/neteq/neteq.cc @@ -30,8 +30,7 @@ std::string NetEq::Config::ToString() const { << ", min_delay_ms=" << min_delay_ms << ", enable_fast_accelerate=" << (enable_fast_accelerate ? "true" : "false") << ", enable_muted_state=" << (enable_muted_state ? "true" : "false") - << ", enable_rtx_handling=" << (enable_rtx_handling ? "true" : "false") - << ", extra_output_delay_ms=" << extra_output_delay_ms; + << ", enable_rtx_handling=" << (enable_rtx_handling ? "true" : "false"); return ss.str(); } diff --git a/api/neteq/neteq.h b/api/neteq/neteq.h index ea7079e369..675742a1ce 100644 --- a/api/neteq/neteq.h +++ b/api/neteq/neteq.h @@ -60,7 +60,7 @@ struct NetEqNetworkStatistics { // These metrics are never reset. struct NetEqLifetimeStatistics { // Stats below correspond to similarly-named fields in the WebRTC stats spec. - // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats + // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats uint64_t total_samples_received = 0; uint64_t concealed_samples = 0; uint64_t concealment_events = 0; @@ -136,10 +136,6 @@ class NetEq { bool enable_rtx_handling = false; absl::optional codec_pair_id; bool for_test_no_time_stretching = false; // Use only for testing. - // Adds extra delay to the output of NetEq, without affecting jitter or - // loss behavior. This is mainly for testing. Value must be a non-negative - // multiple of 10 ms. - int extra_output_delay_ms = 0; }; enum ReturnCodes { kOK = 0, kFail = -1 }; @@ -183,14 +179,6 @@ class NetEq { SdpAudioFormat sdp_format; }; - // Creates a new NetEq object, with parameters set in |config|. The |config| - // object will only have to be valid for the duration of the call to this - // method. - static NetEq* Create( - const NetEq::Config& config, - Clock* clock, - const rtc::scoped_refptr& decoder_factory); - virtual ~NetEq() {} // Inserts a new packet into NetEq. @@ -205,15 +193,15 @@ class NetEq { virtual void InsertEmptyPacket(const RTPHeader& rtp_header) = 0; // Instructs NetEq to deliver 10 ms of audio data. The data is written to - // |audio_frame|. All data in |audio_frame| is wiped; |data_|, |speech_type_|, - // |num_channels_|, |sample_rate_hz_|, |samples_per_channel_|, and - // |vad_activity_| are updated upon success. If an error is returned, some + // `audio_frame`. All data in `audio_frame` is wiped; `data_`, `speech_type_`, + // `num_channels_`, `sample_rate_hz_`, `samples_per_channel_`, and + // `vad_activity_` are updated upon success. If an error is returned, some // fields may not have been updated, or may contain inconsistent values. - // If muted state is enabled (through Config::enable_muted_state), |muted| + // If muted state is enabled (through Config::enable_muted_state), `muted` // may be set to true after a prolonged expand period. When this happens, the - // |data_| in |audio_frame| is not written, but should be interpreted as being + // `data_` in `audio_frame` is not written, but should be interpreted as being // all zeros. For testing purposes, an override can be supplied in the - // |action_override| argument, which will cause NetEq to take this action + // `action_override` argument, which will cause NetEq to take this action // next, instead of the action it would normally choose. An optional output // argument for fetching the current sample rate can be provided, which // will return the same value as last_output_sample_rate_hz() but will avoid @@ -228,12 +216,12 @@ class NetEq { // Replaces the current set of decoders with the given one. virtual void SetCodecs(const std::map& codecs) = 0; - // Associates |rtp_payload_type| with the given codec, which NetEq will + // Associates `rtp_payload_type` with the given codec, which NetEq will // instantiate when it needs it. Returns true iff successful. virtual bool RegisterPayloadType(int rtp_payload_type, const SdpAudioFormat& audio_format) = 0; - // Removes |rtp_payload_type| from the codec database. Returns 0 on success, + // Removes `rtp_payload_type` from the codec database. Returns 0 on success, // -1 on failure. Removing a payload type that is not registered is ok and // will not result in an error. virtual int RemovePayloadType(uint8_t rtp_payload_type) = 0; @@ -250,12 +238,12 @@ class NetEq { // Sets a maximum delay in milliseconds for packet buffer. The latency will // not exceed the given value, even required delay (given the channel // conditions) is higher. Calling this method has the same effect as setting - // the |max_delay_ms| value in the NetEq::Config struct. + // the `max_delay_ms` value in the NetEq::Config struct. virtual bool SetMaximumDelay(int delay_ms) = 0; // Sets a base minimum delay in milliseconds for packet buffer. The minimum - // delay which is set via |SetMinimumDelay| can't be lower than base minimum - // delay. Calling this method is similar to setting the |min_delay_ms| value + // delay which is set via `SetMinimumDelay` can't be lower than base minimum + // delay. Calling this method is similar to setting the `min_delay_ms` value // in the NetEq::Config struct. Returns true if the base minimum is // successfully applied, otherwise false is returned. virtual bool SetBaseMinimumDelayMs(int delay_ms) = 0; @@ -272,7 +260,7 @@ class NetEq { // The packet buffer part of the delay is not updated during DTX/CNG periods. virtual int FilteredCurrentDelayMs() const = 0; - // Writes the current network statistics to |stats|. The statistics are reset + // Writes the current network statistics to `stats`. The statistics are reset // after the call. virtual int NetworkStatistics(NetEqNetworkStatistics* stats) = 0; diff --git a/api/neteq/neteq_controller.h b/api/neteq/neteq_controller.h index 4c49a0c24a..2f203f4344 100644 --- a/api/neteq/neteq_controller.h +++ b/api/neteq/neteq_controller.h @@ -115,13 +115,13 @@ class NetEqController { virtual void SoftReset() = 0; // Given info about the latest received packet, and current jitter buffer - // status, returns the operation. |target_timestamp| and |expand_mutefactor| - // are provided for reference. |last_packet_samples| is the number of samples + // status, returns the operation. `target_timestamp` and `expand_mutefactor` + // are provided for reference. `last_packet_samples` is the number of samples // obtained from the last decoded frame. If there is a packet available, it - // should be supplied in |packet|. The mode resulting from the last call to - // NetEqImpl::GetAudio is supplied in |last_mode|. If there is a DTMF event to - // play, |play_dtmf| should be set to true. The output variable - // |reset_decoder| will be set to true if a reset is required; otherwise it is + // should be supplied in `packet`. The mode resulting from the last call to + // NetEqImpl::GetAudio is supplied in `last_mode`. If there is a DTMF event to + // play, `play_dtmf` should be set to true. The output variable + // `reset_decoder` will be set to true if a reset is required; otherwise it is // left unchanged (i.e., it can remain true if it was true before the call). virtual NetEq::Operation GetDecision(const NetEqStatus& status, bool* reset_decoder) = 0; @@ -144,11 +144,11 @@ class NetEqController { virtual bool SetBaseMinimumDelay(int delay_ms) = 0; virtual int GetBaseMinimumDelay() const = 0; - // These methods test the |cng_state_| for different conditions. + // These methods test the `cng_state_` for different conditions. virtual bool CngRfc3389On() const = 0; virtual bool CngOff() const = 0; - // Resets the |cng_state_| to kCngOff. + // Resets the `cng_state_` to kCngOff. virtual void SetCngOff() = 0; // Reports back to DecisionLogic whether the decision to do expand remains or @@ -157,7 +157,7 @@ class NetEqController { // sync buffer. virtual void ExpandDecision(NetEq::Operation operation) = 0; - // Adds |value| to |sample_memory_|. + // Adds `value` to `sample_memory_`. virtual void AddSampleMemory(int32_t value) = 0; // Returns the target buffer level in ms. diff --git a/api/neteq/neteq_controller_factory.h b/api/neteq/neteq_controller_factory.h index 6478fce568..9aba8a21a7 100644 --- a/api/neteq/neteq_controller_factory.h +++ b/api/neteq/neteq_controller_factory.h @@ -23,7 +23,7 @@ class NetEqControllerFactory { public: virtual ~NetEqControllerFactory() = default; - // Creates a new NetEqController object, with parameters set in |config|. + // Creates a new NetEqController object, with parameters set in `config`. virtual std::unique_ptr CreateNetEqController( const NetEqController::Config& config) const = 0; }; diff --git a/api/neteq/neteq_factory.h b/api/neteq/neteq_factory.h index 65cf9eb2e4..526a1282f5 100644 --- a/api/neteq/neteq_factory.h +++ b/api/neteq/neteq_factory.h @@ -24,7 +24,7 @@ class NetEqFactory { public: virtual ~NetEqFactory() = default; - // Creates a new NetEq object, with parameters set in |config|. The |config| + // Creates a new NetEq object, with parameters set in `config`. The `config` // object will only have to be valid for the duration of the call to this // method. virtual std::unique_ptr CreateNetEq( diff --git a/api/numerics/samples_stats_counter.h b/api/numerics/samples_stats_counter.h index 283c1e4ed2..16d5d2a891 100644 --- a/api/numerics/samples_stats_counter.h +++ b/api/numerics/samples_stats_counter.h @@ -82,7 +82,7 @@ class SamplesStatsCounter { // additions were done. This function may not be called if there are no // samples. // - // |percentile| has to be in [0; 1]. 0 percentile is the min in the array and + // `percentile` has to be in [0; 1]. 0 percentile is the min in the array and // 1 percentile is the max in the array. double GetPercentile(double percentile); // Returns array view with all samples added into counter. There are no @@ -105,14 +105,14 @@ class SamplesStatsCounter { bool sorted_ = false; }; -// Multiply all sample values on |value| and return new SamplesStatsCounter +// Multiply all sample values on `value` and return new SamplesStatsCounter // with resulted samples. Doesn't change origin SamplesStatsCounter. SamplesStatsCounter operator*(const SamplesStatsCounter& counter, double value); inline SamplesStatsCounter operator*(double value, const SamplesStatsCounter& counter) { return counter * value; } -// Divide all sample values on |value| and return new SamplesStatsCounter with +// Divide all sample values on `value` and return new SamplesStatsCounter with // resulted samples. Doesn't change origin SamplesStatsCounter. SamplesStatsCounter operator/(const SamplesStatsCounter& counter, double value); diff --git a/api/packet_socket_factory.h b/api/packet_socket_factory.h index 1e9f470357..29d2606b9b 100644 --- a/api/packet_socket_factory.h +++ b/api/packet_socket_factory.h @@ -11,9 +11,12 @@ #ifndef API_PACKET_SOCKET_FACTORY_H_ #define API_PACKET_SOCKET_FACTORY_H_ +#include #include #include +#include "api/async_dns_resolver.h" +#include "api/wrapping_async_dns_resolver.h" #include "rtc_base/async_packet_socket.h" #include "rtc_base/proxy_info.h" #include "rtc_base/system/rtc_export.h" @@ -56,7 +59,7 @@ class RTC_EXPORT PacketSocketFactory { virtual AsyncPacketSocket* CreateUdpSocket(const SocketAddress& address, uint16_t min_port, uint16_t max_port) = 0; - virtual AsyncPacketSocket* CreateServerTcpSocket( + virtual AsyncListenSocket* CreateServerTcpSocket( const SocketAddress& local_address, uint16_t min_port, uint16_t max_port, @@ -69,7 +72,23 @@ class RTC_EXPORT PacketSocketFactory { const std::string& user_agent, const PacketSocketTcpOptions& tcp_options) = 0; - virtual AsyncResolverInterface* CreateAsyncResolver() = 0; + // The AsyncResolverInterface is deprecated; users are encouraged + // to switch to the AsyncDnsResolverInterface. + // TODO(bugs.webrtc.org/12598): Remove once all downstream users + // are converted. + virtual AsyncResolverInterface* CreateAsyncResolver() { + // Default implementation, so that downstream users can remove this + // immediately after changing to CreateAsyncDnsResolver + RTC_DCHECK_NOTREACHED(); + return nullptr; + } + + virtual std::unique_ptr + CreateAsyncDnsResolver() { + // Default implementation, to aid in transition to AsyncDnsResolverInterface + return std::make_unique( + CreateAsyncResolver()); + } private: PacketSocketFactory(const PacketSocketFactory&) = delete; diff --git a/api/peer_connection_interface.cc b/api/peer_connection_interface.cc index 230731c42d..9f159ea731 100644 --- a/api/peer_connection_interface.cc +++ b/api/peer_connection_interface.cc @@ -41,12 +41,6 @@ PeerConnectionInterface::RTCConfiguration::RTCConfiguration( PeerConnectionInterface::RTCConfiguration::~RTCConfiguration() = default; -RTCError PeerConnectionInterface::RemoveTrackNew( - rtc::scoped_refptr sender) { - return RTCError(RemoveTrack(sender) ? RTCErrorType::NONE - : RTCErrorType::INTERNAL_ERROR); -} - RTCError PeerConnectionInterface::SetConfiguration( const PeerConnectionInterface::RTCConfiguration& config) { return RTCError(); diff --git a/api/peer_connection_interface.h b/api/peer_connection_interface.h index 5499b7d87c..4c4a638ad5 100644 --- a/api/peer_connection_interface.h +++ b/api/peer_connection_interface.h @@ -76,6 +76,7 @@ #include #include "absl/base/attributes.h" +#include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "api/adaptation/resource.h" #include "api/async_dns_resolver.h" @@ -94,6 +95,7 @@ #include "api/jsep.h" #include "api/media_stream_interface.h" #include "api/media_types.h" +#include "api/metronome/metronome.h" #include "api/neteq/neteq_factory.h" #include "api/network_state_predictor.h" #include "api/packet_socket_factory.h" @@ -167,7 +169,12 @@ class StatsObserver : public rtc::RefCountInterface { ~StatsObserver() override = default; }; -enum class SdpSemantics { kPlanB, kUnifiedPlan }; +enum class SdpSemantics { + // TODO(https://crbug.com/webrtc/13528): Remove support for kPlanB. + kPlanB_DEPRECATED, + kPlanB [[deprecated]] = kPlanB_DEPRECATED, + kUnifiedPlan, +}; class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { public: @@ -180,6 +187,7 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { kHaveRemotePrAnswer, kClosed, }; + static constexpr absl::string_view AsString(SignalingState); // See https://w3c.github.io/webrtc-pc/#dom-rtcicegatheringstate enum IceGatheringState { @@ -187,6 +195,7 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { kIceGatheringGathering, kIceGatheringComplete }; + static constexpr absl::string_view AsString(IceGatheringState state); // See https://w3c.github.io/webrtc-pc/#dom-rtcpeerconnectionstate enum class PeerConnectionState { @@ -197,6 +206,7 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { kFailed, kClosed, }; + static constexpr absl::string_view AsString(PeerConnectionState state); // See https://w3c.github.io/webrtc-pc/#dom-rtciceconnectionstate enum IceConnectionState { @@ -209,6 +219,7 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { kIceConnectionClosed, kIceConnectionMax, }; + static constexpr absl::string_view AsString(IceConnectionState state); // TLS certificate policy. enum TlsCertPolicy { @@ -235,9 +246,9 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { std::string username; std::string password; TlsCertPolicy tls_cert_policy = kTlsCertPolicySecure; - // If the URIs in |urls| only contain IP addresses, this field can be used + // If the URIs in `urls` only contain IP addresses, this field can be used // to indicate the hostname, which may be necessary for TLS (using the SNI - // extension). If |urls| itself contains the hostname, this isn't + // extension). If `urls` itself contains the hostname, this isn't // necessary. std::string hostname; // List of protocols to be used in the TLS ALPN extension. @@ -290,6 +301,13 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { enum ContinualGatheringPolicy { GATHER_ONCE, GATHER_CONTINUALLY }; + struct PortAllocatorConfig { + // For min_port and max_port, 0 means not specified. + int min_port = 0; + int max_port = 0; + uint32_t flags = 0; // Same as kDefaultPortAllocatorFlags. + }; + enum class RTCConfigurationType { // A configuration that is safer to use, despite not having the best // performance. Currently this is the default configuration. @@ -367,6 +385,18 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { video_rtcp_report_interval_ms; } + // Settings for the port allcoator. Applied only if the port allocator is + // created by PeerConnectionFactory, not if it is injected with + // PeerConnectionDependencies + int min_port() const { return port_allocator_config.min_port; } + void set_min_port(int port) { port_allocator_config.min_port = port; } + int max_port() const { return port_allocator_config.max_port; } + void set_max_port(int port) { port_allocator_config.max_port = port; } + uint32_t port_allocator_flags() { return port_allocator_config.flags; } + void set_port_allocator_flags(uint32_t flags) { + port_allocator_config.flags = flags; + } + static const int kUndefined = -1; // Default maximum number of packets in the audio jitter buffer. static const int kAudioJitterBufferMaxPackets = 200; @@ -526,7 +556,7 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // re-determining was removed in ICEbis (ICE v2). bool redetermine_role_on_ice_restart = true; - // This flag is only effective when |continual_gathering_policy| is + // This flag is only effective when `continual_gathering_policy` is // GATHER_CONTINUALLY. // // If true, after the ICE transport type is changed such that new types of @@ -594,28 +624,26 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // cost. absl::optional network_preference; - // Configure the SDP semantics used by this PeerConnection. Note that the - // WebRTC 1.0 specification requires kUnifiedPlan semantics. The - // RtpTransceiver API is only available with kUnifiedPlan semantics. + // Configure the SDP semantics used by this PeerConnection. By default, this + // is Unified Plan which is compliant to the WebRTC 1.0 specification. It is + // possible to overrwite this to the deprecated Plan B SDP format, but note + // that kPlanB will be deleted at some future date, see + // https://crbug.com/webrtc/13528. + // + // kUnifiedPlan will cause the PeerConnection to create offers and answers + // with multiple m= sections where each m= section maps to one RtpSender and + // one RtpReceiver (an RtpTransceiver), either both audio or both video. + // This will also cause the PeerConnection to ignore all but the first + // a=ssrc lines that form a Plan B streams (if the PeerConnection is given + // Plan B SDP to process). // - // kPlanB will cause PeerConnection to create offers and answers with at + // kPlanB will cause the PeerConnection to create offers and answers with at // most one audio and one video m= section with multiple RtpSenders and // RtpReceivers specified as multiple a=ssrc lines within the section. This // will also cause PeerConnection to ignore all but the first m= section of - // the same media type. - // - // kUnifiedPlan will cause PeerConnection to create offers and answers with - // multiple m= sections where each m= section maps to one RtpSender and one - // RtpReceiver (an RtpTransceiver), either both audio or both video. This - // will also cause PeerConnection to ignore all but the first a=ssrc lines - // that form a Plan B stream. - // - // For users who wish to send multiple audio/video streams and need to stay - // interoperable with legacy WebRTC implementations or use legacy APIs, - // specify kPlanB. - // - // For all other users, specify kUnifiedPlan. - SdpSemantics sdp_semantics = SdpSemantics::kPlanB; + // the same media type (if the PeerConnection is given Unified Plan SDP to + // process). + SdpSemantics sdp_semantics = SdpSemantics::kUnifiedPlan; // TODO(bugs.webrtc.org/9891) - Move to crypto_options or remove. // Actively reset the SRTP parameters whenever the DTLS transports @@ -653,6 +681,20 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // The ping interval (ms) when the connection is stable and writable. This // parameter overrides the default value in the ICE implementation if set. absl::optional stable_writable_connection_ping_interval_ms; + + // Whether this PeerConnection will avoid VPNs (kAvoidVpn), prefer VPNs + // (kPreferVpn), only work over VPN (kOnlyUseVpn) or only work over non-VPN + // (kNeverUseVpn) interfaces. This controls which local interfaces the + // PeerConnection will prefer to connect over. Since VPN detection is not + // perfect, adherence to this preference cannot be guaranteed. + VpnPreference vpn_preference = VpnPreference::kDefault; + + // List of address/length subnets that should be treated like + // VPN (in case webrtc fails to auto detect them). + std::vector vpn_list; + + PortAllocatorConfig port_allocator_config; + // // Don't forget to update operator== if adding something. // @@ -712,8 +754,8 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { }; // Used by GetStats to decide which stats to include in the stats reports. - // |kStatsOutputLevelStandard| includes the standard stats for Javascript API; - // |kStatsOutputLevelDebug| includes both the standard stats and additional + // `kStatsOutputLevelStandard` includes the standard stats for Javascript API; + // `kStatsOutputLevelDebug` includes both the standard stats and additional // stats for debugging purposes. enum StatsOutputLevel { kStatsOutputLevelStandard, @@ -754,39 +796,41 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // Add a new MediaStreamTrack to be sent on this PeerConnection, and return // the newly created RtpSender. The RtpSender will be associated with the - // streams specified in the |stream_ids| list. + // streams specified in the `stream_ids` list. // // Errors: - // - INVALID_PARAMETER: |track| is null, has a kind other than audio or video, + // - INVALID_PARAMETER: `track` is null, has a kind other than audio or video, // or a sender already exists for the track. // - INVALID_STATE: The PeerConnection is closed. virtual RTCErrorOr> AddTrack( rtc::scoped_refptr track, const std::vector& stream_ids) = 0; - // Remove an RtpSender from this PeerConnection. - // Returns true on success. - // TODO(steveanton): Replace with signature that returns RTCError. - virtual bool RemoveTrack(RtpSenderInterface* sender) = 0; - - // Plan B semantics: Removes the RtpSender from this PeerConnection. - // Unified Plan semantics: Stop sending on the RtpSender and mark the + // Removes the connection between a MediaStreamTrack and the PeerConnection. + // Stops sending on the RtpSender and marks the // corresponding RtpTransceiver direction as no longer sending. + // https://w3c.github.io/webrtc-pc/#dom-rtcpeerconnection-removetrack // // Errors: - // - INVALID_PARAMETER: |sender| is null or (Plan B only) the sender is not + // - INVALID_PARAMETER: `sender` is null or (Plan B only) the sender is not // associated with this PeerConnection. // - INVALID_STATE: PeerConnection is closed. + // + // Plan B semantics: Removes the RtpSender from this PeerConnection. + // // TODO(bugs.webrtc.org/9534): Rename to RemoveTrack once the other signature - // is removed. - virtual RTCError RemoveTrackNew( - rtc::scoped_refptr sender); + // is removed; remove default implementation once upstream is updated. + virtual RTCError RemoveTrackOrError( + rtc::scoped_refptr sender) { + RTC_CHECK_NOTREACHED(); + return RTCError(); + } // AddTransceiver creates a new RtpTransceiver and adds it to the set of // transceivers. Adding a transceiver will cause future calls to CreateOffer // to add a media description for the corresponding transceiver. // - // The initial value of |mid| in the returned transceiver is null. Setting a + // The initial value of `mid` in the returned transceiver is null. Setting a // new session description may change it to a non-null value. // // https://w3c.github.io/webrtc-pc/#dom-rtcpeerconnection-addtransceiver @@ -805,7 +849,7 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // of the transceiver (and sender/receiver) will be derived from the kind of // the track. // Errors: - // - INVALID_PARAMETER: |track| is null. + // - INVALID_PARAMETER: `track` is null. virtual RTCErrorOr> AddTransceiver(rtc::scoped_refptr track) = 0; virtual RTCErrorOr> @@ -815,7 +859,7 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // Adds a transceiver with the given kind. Can either be MEDIA_TYPE_AUDIO or // MEDIA_TYPE_VIDEO. // Errors: - // - INVALID_PARAMETER: |media_type| is not MEDIA_TYPE_AUDIO or + // - INVALID_PARAMETER: `media_type` is not MEDIA_TYPE_AUDIO or // MEDIA_TYPE_VIDEO. virtual RTCErrorOr> AddTransceiver(cricket::MediaType media_type) = 0; @@ -830,9 +874,9 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // The standard way to do this would be through "addTransceiver", but we // don't support that API yet. // - // |kind| must be "audio" or "video". + // `kind` must be "audio" or "video". // - // |stream_id| is used to populate the msid attribute; if empty, one will + // `stream_id` is used to populate the msid attribute; if empty, one will // be generated automatically. // // This method is not supported with kUnifiedPlan semantics. Please use @@ -986,7 +1030,7 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // returned by CreateOffer() or CreateAnswer() or else the operation should // fail. Our implementation however allows some amount of "SDP munging", but // please note that this is HIGHLY DISCOURAGED. If you do not intent to munge - // SDP, the method below that doesn't take |desc| as an argument will create + // SDP, the method below that doesn't take `desc` as an argument will create // the offer or answer for you. // // The observer is invoked as soon as the operation completes, which could be @@ -1044,10 +1088,10 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { virtual PeerConnectionInterface::RTCConfiguration GetConfiguration() = 0; - // Sets the PeerConnection's global configuration to |config|. + // Sets the PeerConnection's global configuration to `config`. // - // The members of |config| that may be changed are |type|, |servers|, - // |ice_candidate_pool_size| and |prune_turn_ports| (though the candidate + // The members of `config` that may be changed are `type`, `servers`, + // `ice_candidate_pool_size` and `prune_turn_ports` (though the candidate // pool size can't be changed after the first call to SetLocalDescription). // Note that this means the BUNDLE and RTCP-multiplexing policies cannot be // changed with this method. @@ -1055,14 +1099,14 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // Any changes to STUN/TURN servers or ICE candidate policy will affect the // next gathering phase, and cause the next call to createOffer to generate // new ICE credentials, as described in JSEP. This also occurs when - // |prune_turn_ports| changes, for the same reasoning. + // `prune_turn_ports` changes, for the same reasoning. // - // If an error occurs, returns false and populates |error| if non-null: - // - INVALID_MODIFICATION if |config| contains a modified parameter other + // If an error occurs, returns false and populates `error` if non-null: + // - INVALID_MODIFICATION if `config` contains a modified parameter other // than one of the parameters listed above. - // - INVALID_RANGE if |ice_candidate_pool_size| is out of range. + // - INVALID_RANGE if `ice_candidate_pool_size` is out of range. // - SYNTAX_ERROR if parsing an ICE server URL failed. - // - INVALID_PARAMETER if a TURN server is missing |username| or |password|. + // - INVALID_PARAMETER if a TURN server is missing `username` or `password`. // - INTERNAL_ERROR if an unexpected error occurred. // // TODO(nisse): Make this pure virtual once all Chrome subclasses of @@ -1071,9 +1115,9 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { const PeerConnectionInterface::RTCConfiguration& config); // Provides a remote candidate to the ICE Agent. - // A copy of the |candidate| will be created and added to the remote + // A copy of the `candidate` will be created and added to the remote // description. So the caller of this method still has the ownership of the - // |candidate|. + // `candidate`. // TODO(hbos): The spec mandates chaining this operation onto the operations // chain; deprecate and remove this version in favor of the callback-based // signature. @@ -1096,13 +1140,13 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // this PeerConnection. Other limitations might affect these limits and // are respected (for example "b=AS" in SDP). // - // Setting |current_bitrate_bps| will reset the current bitrate estimate + // Setting `current_bitrate_bps` will reset the current bitrate estimate // to the provided value. virtual RTCError SetBitrate(const BitrateSettings& bitrate) = 0; // Enable/disable playout of received audio streams. Enabled by default. Note // that even if playout is enabled, streams will only be played out if the - // appropriate SDP is also applied. Setting |playout| to false will stop + // appropriate SDP is also applied. Setting `playout` to false will stop // playout of the underlying audio device but starts a task which will poll // for audio data every 10ms to ensure that audio processing happens and the // audio statistics are updated. @@ -1157,13 +1201,13 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { virtual void AddAdaptationResource(rtc::scoped_refptr resource) {} // Start RtcEventLog using an existing output-sink. Takes ownership of - // |output| and passes it on to Call, which will take the ownership. If the + // `output` and passes it on to Call, which will take the ownership. If the // operation fails the output will be closed and deallocated. The event log - // will send serialized events to the output object every |output_period_ms|. + // will send serialized events to the output object every `output_period_ms`. // Applications using the event log should generally make their own trade-off // regarding the output period. A long period is generally more efficient, // with potential drawbacks being more bursty thread usage, and more events - // lost in case the application crashes. If the |output_period_ms| argument is + // lost in case the application crashes. If the `output_period_ms` argument is // omitted, webrtc selects a default deemed to be workable in most cases. virtual bool StartRtcEventLog(std::unique_ptr output, int64_t output_period_ms) = 0; @@ -1222,7 +1266,7 @@ class PeerConnectionObserver { // Used to fire spec-compliant onnegotiationneeded events, which should only // fire when the Operations Chain is empty. The observer is responsible for // queuing a task (e.g. Chromium: jump to main thread) to maybe fire the - // event. The event identified using |event_id| must only fire if + // event. The event identified using `event_id` must only fire if // PeerConnection::ShouldFireNegotiationNeededEvent() returns true since it is // possible for the event to become invalidated by operations subsequently // chained. @@ -1254,14 +1298,6 @@ class PeerConnectionObserver { // A new ICE candidate has been gathered. virtual void OnIceCandidate(const IceCandidateInterface* candidate) = 0; - // Gathering of an ICE candidate failed. - // See https://w3c.github.io/webrtc-pc/#event-icecandidateerror - // |host_candidate| is a stringified socket address. - virtual void OnIceCandidateError(const std::string& host_candidate, - const std::string& url, - int error_code, - const std::string& error_text) {} - // Gathering of an ICE candidate failed. // See https://w3c.github.io/webrtc-pc/#event-icecandidateerror virtual void OnIceCandidateError(const std::string& address, @@ -1295,8 +1331,8 @@ class PeerConnectionObserver { // This is called when signaling indicates a transceiver will be receiving // media from the remote endpoint. This is fired during a call to // SetRemoteDescription. The receiving track can be accessed by: - // |transceiver->receiver()->track()| and its associated streams by - // |transceiver->receiver()->streams()|. + // `transceiver->receiver()->track()` and its associated streams by + // `transceiver->receiver()->streams()`. // Note: This will only be called if Unified Plan semantics are specified. // This behavior is specified in section 2.2.8.2.5 of the "Set the // RTCSessionDescription" algorithm: @@ -1384,6 +1420,7 @@ struct RTC_EXPORT PeerConnectionFactoryDependencies final { rtc::Thread* network_thread = nullptr; rtc::Thread* worker_thread = nullptr; rtc::Thread* signaling_thread = nullptr; + rtc::SocketFactory* socket_factory = nullptr; std::unique_ptr task_queue_factory; std::unique_ptr media_engine; std::unique_ptr call_factory; @@ -1393,7 +1430,7 @@ struct RTC_EXPORT PeerConnectionFactoryDependencies final { network_state_predictor_factory; std::unique_ptr network_controller_factory; // This will only be used if CreatePeerConnection is called without a - // |port_allocator|, causing the default allocator and network manager to be + // `port_allocator`, causing the default allocator and network manager to be // used. std::unique_ptr network_monitor_factory; std::unique_ptr neteq_factory; @@ -1401,6 +1438,7 @@ struct RTC_EXPORT PeerConnectionFactoryDependencies final { std::unique_ptr trials; std::unique_ptr transport_controller_send_factory; + std::unique_ptr metronome; }; // PeerConnectionFactoryInterface is the factory interface used for creating @@ -1467,12 +1505,12 @@ class RTC_EXPORT PeerConnectionFactoryInterface const PeerConnectionInterface::RTCConfiguration& configuration, PeerConnectionDependencies dependencies); - // Deprecated; |allocator| and |cert_generator| may be null, in which case + // Deprecated; `allocator` and `cert_generator` may be null, in which case // default implementations will be used. // - // |observer| must not be null. + // `observer` must not be null. // - // Note that this method does not take ownership of |observer|; it's the + // Note that this method does not take ownership of `observer`; it's the // responsibility of the caller to delete it. It can be safely deleted after // Close has been called on the returned PeerConnection, which ensures no // more observer callbacks will be invoked. @@ -1483,13 +1521,13 @@ class RTC_EXPORT PeerConnectionFactoryInterface std::unique_ptr cert_generator, PeerConnectionObserver* observer); - // Returns the capabilities of an RTP sender of type |kind|. + // Returns the capabilities of an RTP sender of type `kind`. // If for some reason you pass in MEDIA_TYPE_DATA, returns an empty structure. // TODO(orphis): Make pure virtual when all subclasses implement it. virtual RtpCapabilities GetRtpSenderCapabilities( cricket::MediaType kind) const; - // Returns the capabilities of an RTP receiver of type |kind|. + // Returns the capabilities of an RTP receiver of type `kind`. // If for some reason you pass in MEDIA_TYPE_DATA, returns an empty structure. // TODO(orphis): Make pure virtual when all subclasses implement it. virtual RtpCapabilities GetRtpReceiverCapabilities( @@ -1499,22 +1537,22 @@ class RTC_EXPORT PeerConnectionFactoryInterface const std::string& stream_id) = 0; // Creates an AudioSourceInterface. - // |options| decides audio processing settings. + // `options` decides audio processing settings. virtual rtc::scoped_refptr CreateAudioSource( const cricket::AudioOptions& options) = 0; - // Creates a new local VideoTrack. The same |source| can be used in several + // Creates a new local VideoTrack. The same `source` can be used in several // tracks. virtual rtc::scoped_refptr CreateVideoTrack( const std::string& label, VideoTrackSourceInterface* source) = 0; - // Creates an new AudioTrack. At the moment |source| can be null. + // Creates an new AudioTrack. At the moment `source` can be null. virtual rtc::scoped_refptr CreateAudioTrack( const std::string& label, AudioSourceInterface* source) = 0; - // Starts AEC dump using existing file. Takes ownership of |file| and passes + // Starts AEC dump using existing file. Takes ownership of `file` and passes // it on to VoiceEngine (via other objects) immediately, which will take // the ownerhip. If the operation fails, the file will be closed. // A maximum file size in bytes can be specified. When the file size limit is @@ -1549,14 +1587,101 @@ class RTC_EXPORT PeerConnectionFactoryInterface // video-specific interfaces, and omit the corresponding modules from its // build. // -// If |network_thread| or |worker_thread| are null, the PeerConnectionFactory -// will create the necessary thread internally. If |signaling_thread| is null, +// If `network_thread` or `worker_thread` are null, the PeerConnectionFactory +// will create the necessary thread internally. If `signaling_thread` is null, // the PeerConnectionFactory will use the thread on which this method is called // as the signaling thread, wrapping it in an rtc::Thread object if needed. RTC_EXPORT rtc::scoped_refptr CreateModularPeerConnectionFactory( PeerConnectionFactoryDependencies dependencies); +// https://w3c.github.io/webrtc-pc/#dom-rtcsignalingstate +inline constexpr absl::string_view PeerConnectionInterface::AsString( + SignalingState state) { + switch (state) { + case SignalingState::kStable: + return "stable"; + case SignalingState::kHaveLocalOffer: + return "have-local-offer"; + case SignalingState::kHaveLocalPrAnswer: + return "have-local-pranswer"; + case SignalingState::kHaveRemoteOffer: + return "have-remote-offer"; + case SignalingState::kHaveRemotePrAnswer: + return "have-remote-pranswer"; + case SignalingState::kClosed: + return "closed"; + } + // This cannot happen. + // Not using "RTC_CHECK_NOTREACHED()" because AsString() is constexpr. + return ""; +} + +// https://w3c.github.io/webrtc-pc/#dom-rtcicegatheringstate +inline constexpr absl::string_view PeerConnectionInterface::AsString( + IceGatheringState state) { + switch (state) { + case IceGatheringState::kIceGatheringNew: + return "new"; + case IceGatheringState::kIceGatheringGathering: + return "gathering"; + case IceGatheringState::kIceGatheringComplete: + return "complete"; + } + // This cannot happen. + // Not using "RTC_CHECK_NOTREACHED()" because AsString() is constexpr. + return ""; +} + +// https://w3c.github.io/webrtc-pc/#dom-rtciceconnectionstate +inline constexpr absl::string_view PeerConnectionInterface::AsString( + PeerConnectionState state) { + switch (state) { + case PeerConnectionState::kNew: + return "new"; + case PeerConnectionState::kConnecting: + return "connecting"; + case PeerConnectionState::kConnected: + return "connected"; + case PeerConnectionState::kDisconnected: + return "disconnected"; + case PeerConnectionState::kFailed: + return "failed"; + case PeerConnectionState::kClosed: + return "closed"; + } + // This cannot happen. + // Not using "RTC_CHECK_NOTREACHED()" because AsString() is constexpr. + return ""; +} + +inline constexpr absl::string_view PeerConnectionInterface::AsString( + IceConnectionState state) { + switch (state) { + case kIceConnectionNew: + return "new"; + case kIceConnectionChecking: + return "checking"; + case kIceConnectionConnected: + return "connected"; + case kIceConnectionCompleted: + return "completed"; + case kIceConnectionFailed: + return "failed"; + case kIceConnectionDisconnected: + return "disconnected"; + case kIceConnectionClosed: + return "closed"; + case kIceConnectionMax: + // This cannot happen. + // Not using "RTC_CHECK_NOTREACHED()" because AsString() is constexpr. + return ""; + } + // This cannot happen. + // Not using "RTC_CHECK_NOTREACHED()" because AsString() is constexpr. + return ""; +} + } // namespace webrtc #endif // API_PEER_CONNECTION_INTERFACE_H_ diff --git a/api/ref_counted_base.h b/api/ref_counted_base.h index 931cb20762..f20228b740 100644 --- a/api/ref_counted_base.h +++ b/api/ref_counted_base.h @@ -12,7 +12,6 @@ #include -#include "rtc_base/constructor_magic.h" #include "rtc_base/ref_counter.h" namespace rtc { @@ -21,6 +20,9 @@ class RefCountedBase { public: RefCountedBase() = default; + RefCountedBase(const RefCountedBase&) = delete; + RefCountedBase& operator=(const RefCountedBase&) = delete; + void AddRef() const { ref_count_.IncRef(); } RefCountReleaseStatus Release() const { const auto status = ref_count_.DecRef(); @@ -39,8 +41,6 @@ class RefCountedBase { private: mutable webrtc::webrtc_impl::RefCounter ref_count_{0}; - - RTC_DISALLOW_COPY_AND_ASSIGN(RefCountedBase); }; // Template based version of `RefCountedBase` for simple implementations that do @@ -61,6 +61,9 @@ class RefCountedNonVirtual { public: RefCountedNonVirtual() = default; + RefCountedNonVirtual(const RefCountedNonVirtual&) = delete; + RefCountedNonVirtual& operator=(const RefCountedNonVirtual&) = delete; + void AddRef() const { ref_count_.IncRef(); } RefCountReleaseStatus Release() const { // If you run into this assert, T has virtual methods. There are two @@ -88,8 +91,6 @@ class RefCountedNonVirtual { private: mutable webrtc::webrtc_impl::RefCounter ref_count_{0}; - - RTC_DISALLOW_COPY_AND_ASSIGN(RefCountedNonVirtual); }; } // namespace rtc diff --git a/api/rtc_error.h b/api/rtc_error.h index 7cfd89ab75..42ceed18d9 100644 --- a/api/rtc_error.h +++ b/api/rtc_error.h @@ -176,13 +176,13 @@ inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982) #endif // WEBRTC_UNIT_TEST // Helper macro that can be used by implementations to create an error with a -// message and log it. |message| should be a string literal or movable +// message and log it. `message` should be a string literal or movable // std::string. -#define LOG_AND_RETURN_ERROR_EX(type, message, severity) \ - { \ - RTC_DCHECK(type != RTCErrorType::NONE); \ - RTC_LOG(severity) << message << " (" << ToString(type) << ")"; \ - return webrtc::RTCError(type, message); \ +#define LOG_AND_RETURN_ERROR_EX(type, message, severity) \ + { \ + RTC_DCHECK(type != RTCErrorType::NONE); \ + RTC_LOG(severity) << message << " (" << ::webrtc::ToString(type) << ")"; \ + return ::webrtc::RTCError(type, message); \ } #define LOG_AND_RETURN_ERROR(type, message) \ @@ -244,7 +244,7 @@ class RTCErrorOr { // // REQUIRES: !error.ok(). This requirement is DCHECKed. RTCErrorOr(RTCError&& error) : error_(std::move(error)) { // NOLINT - RTC_DCHECK(!error.ok()); + RTC_DCHECK(!error_.ok()); } // Constructs a new RTCErrorOr with the given value. After calling this diff --git a/api/rtc_event_log/rtc_event.h b/api/rtc_event_log/rtc_event.h index 51db8f0b4d..8697a25a74 100644 --- a/api/rtc_event_log/rtc_event.h +++ b/api/rtc_event_log/rtc_event.h @@ -27,7 +27,7 @@ class RtcEvent { // of Type. This leaks the information of existing subclasses into the // superclass, but the *actual* information - rtclog::StreamConfig, etc. - // is kept separate. - enum class Type { + enum class Type : uint32_t { AlrStateEvent, RouteChangeEvent, RemoteEstimateEvent, @@ -53,7 +53,9 @@ class RtcEvent { GenericPacketSent, GenericPacketReceived, GenericAckReceived, - FrameDecoded + FrameDecoded, + BeginV3Log = 0x2501580, + EndV3Log = 0x2501581 }; RtcEvent(); @@ -63,6 +65,13 @@ class RtcEvent { virtual bool IsConfigEvent() const = 0; + // Events are grouped by Type before being encoded. + // Optionally, `GetGroupKey` can be overloaded to group the + // events by a secondary key (in addition to the event type.) + // This can, in some cases, improve compression efficiency + // e.g. by grouping events by SSRC. + virtual uint32_t GetGroupKey() const { return 0; } + int64_t timestamp_ms() const { return timestamp_us_ / 1000; } int64_t timestamp_us() const { return timestamp_us_; } diff --git a/api/rtc_event_log/rtc_event_log.h b/api/rtc_event_log/rtc_event_log.h index cebaf84e40..7b42cdc028 100644 --- a/api/rtc_event_log/rtc_event_log.h +++ b/api/rtc_event_log/rtc_event_log.h @@ -29,7 +29,7 @@ class RtcEventLog { // TODO(eladalon): Get rid of the legacy encoding and this enum once all // clients have migrated to the new format. - enum class EncodingType { Legacy, NewFormat }; + enum class EncodingType { Legacy, NewFormat, ProtoFree }; virtual ~RtcEventLog() = default; @@ -42,7 +42,7 @@ class RtcEventLog { // which it would be permissible to read and/or modify it. virtual void StopLogging() = 0; - // Stops logging to file and calls |callback| when the file has been closed. + // Stops logging to file and calls `callback` when the file has been closed. // Note that it is not safe to call any other members, including the // destructor, until the callback has been called. // TODO(srte): Remove default implementation when it's safe to do so. diff --git a/api/rtc_event_log_output.h b/api/rtc_event_log_output.h index 92fb9e855b..cd16b27501 100644 --- a/api/rtc_event_log_output.h +++ b/api/rtc_event_log_output.h @@ -29,7 +29,7 @@ class RtcEventLogOutput { // Write encoded events to an output. Returns true if the output was // successfully written in its entirety. Otherwise, no guarantee is given // about how much data was written, if any. The output sink becomes inactive - // after the first time |false| is returned. Write() may not be called on + // after the first time `false` is returned. Write() may not be called on // an inactive output sink. virtual bool Write(const std::string& output) = 0; diff --git a/api/rtp_packet_info.h b/api/rtp_packet_info.h index 605620d638..bc9839f479 100644 --- a/api/rtp_packet_info.h +++ b/api/rtp_packet_info.h @@ -23,9 +23,9 @@ namespace webrtc { // -// Structure to hold information about a received |RtpPacket|. It is primarily +// Structure to hold information about a received `RtpPacket`. It is primarily // used to carry per-packet information from when a packet is received until -// the information is passed to |SourceTracker|. +// the information is passed to `SourceTracker`. // class RTC_EXPORT RtpPacketInfo { public: @@ -102,8 +102,8 @@ class RTC_EXPORT RtpPacketInfo { // Fields from the Absolute Capture Time header extension: // http://www.webrtc.org/experiments/rtp-hdrext/abs-capture-time - // To not be confused with |local_capture_clock_offset_|, the - // |estimated_capture_clock_offset| in |absolute_capture_time_| should + // To not be confused with `local_capture_clock_offset_`, the + // `estimated_capture_clock_offset` in `absolute_capture_time_` should // represent the clock offset between a remote sender and the capturer, and // thus equals to the corresponding values in the received RTP packets, // subjected to possible interpolations. @@ -113,7 +113,7 @@ class RTC_EXPORT RtpPacketInfo { // capture clock offset defined in the Absolute Capture Time header extension. absl::optional local_capture_clock_offset_; - // Local |webrtc::Clock|-based timestamp of when the packet was received. + // Local `webrtc::Clock`-based timestamp of when the packet was received. Timestamp receive_time_; }; diff --git a/api/rtp_packet_infos.h b/api/rtp_packet_infos.h index d63646414d..031e33332e 100644 --- a/api/rtp_packet_infos.h +++ b/api/rtp_packet_infos.h @@ -18,6 +18,7 @@ #include "api/ref_counted_base.h" #include "api/rtp_packet_info.h" #include "api/scoped_refptr.h" +#include "rtc_base/ref_counted_object.h" #include "rtc_base/system/rtc_export.h" namespace webrtc { @@ -26,8 +27,8 @@ namespace webrtc { // an audio or video frame. Uses internal reference counting to make it very // cheap to copy. // -// We should ideally just use |std::vector| and have it -// |std::move()|-ed as the per-packet information is transferred from one object +// We should ideally just use `std::vector` and have it +// `std::move()`-ed as the per-packet information is transferred from one object // to another. But moving the info, instead of copying it, is not easily done // for the current video code. class RTC_EXPORT RtpPacketInfos { @@ -79,7 +80,7 @@ class RTC_EXPORT RtpPacketInfos { size_type size() const { return entries().size(); } private: - class Data : public rtc::RefCountedBase { + class Data final : public rtc::RefCountedNonVirtual { public: static rtc::scoped_refptr Create(const vector_type& entries) { // Performance optimization for the empty case. @@ -87,7 +88,7 @@ class RTC_EXPORT RtpPacketInfos { return nullptr; } - return new Data(entries); + return rtc::make_ref_counted(entries); } static rtc::scoped_refptr Create(vector_type&& entries) { @@ -96,16 +97,16 @@ class RTC_EXPORT RtpPacketInfos { return nullptr; } - return new Data(std::move(entries)); + return rtc::make_ref_counted(std::move(entries)); } const vector_type& entries() const { return entries_; } - private: explicit Data(const vector_type& entries) : entries_(entries) {} explicit Data(vector_type&& entries) : entries_(std::move(entries)) {} - ~Data() override {} + ~Data() = default; + private: const vector_type entries_; }; diff --git a/api/rtp_parameters.cc b/api/rtp_parameters.cc index 5ce6780753..c48b8da02c 100644 --- a/api/rtp_parameters.cc +++ b/api/rtp_parameters.cc @@ -11,6 +11,7 @@ #include #include +#include #include #include "api/array_view.h" @@ -131,6 +132,7 @@ constexpr char RtpExtension::kMidUri[]; constexpr char RtpExtension::kRidUri[]; constexpr char RtpExtension::kRepairedRidUri[]; constexpr char RtpExtension::kVideoFrameTrackingIdUri[]; +constexpr char RtpExtension::kCsrcAudioLevelsUri[]; constexpr int RtpExtension::kMinId; constexpr int RtpExtension::kMaxId; @@ -237,12 +239,6 @@ const RtpExtension* RtpExtension::FindHeaderExtensionByUri( return fallback_extension; } -const RtpExtension* RtpExtension::FindHeaderExtensionByUri( - const std::vector& extensions, - absl::string_view uri) { - return FindHeaderExtensionByUri(extensions, uri, kPreferEncryptedExtension); -} - const RtpExtension* RtpExtension::FindHeaderExtensionByUriAndEncryption( const std::vector& extensions, absl::string_view uri, @@ -285,6 +281,14 @@ const std::vector RtpExtension::DeduplicateHeaderExtensions( } } + // Sort the returned vector to make comparisons of header extensions reliable. + // In order of priority, we sort by uri first, then encrypt and id last. + std::sort(filtered.begin(), filtered.end(), + [](const RtpExtension& a, const RtpExtension& b) { + return std::tie(a.uri, a.encrypt, a.id) < + std::tie(b.uri, b.encrypt, b.id); + }); + return filtered; } } // namespace webrtc diff --git a/api/rtp_parameters.h b/api/rtp_parameters.h index a098bad6b0..45cedfdd9c 100644 --- a/api/rtp_parameters.h +++ b/api/rtp_parameters.h @@ -126,7 +126,7 @@ struct RTC_EXPORT RtpCodecCapability { RtpCodecCapability(); ~RtpCodecCapability(); - // Build MIME "type/subtype" string from |name| and |kind|. + // Build MIME "type/subtype" string from `name` and `kind`. std::string mime_type() const { return MediaTypeToString(kind) + "/" + name; } // Used to identify the codec. Equivalent to MIME subtype. @@ -277,11 +277,6 @@ struct RTC_EXPORT RtpExtension { const std::vector& extensions, absl::string_view uri, Filter filter); - ABSL_DEPRECATED( - "Use RtpExtension::FindHeaderExtensionByUri with filter argument") - static const RtpExtension* FindHeaderExtensionByUri( - const std::vector& extensions, - absl::string_view uri); // Returns the header extension with the given URI and encrypt parameter, // if found, otherwise nullptr. @@ -291,6 +286,9 @@ struct RTC_EXPORT RtpExtension { bool encrypt); // Returns a list of extensions where any extension URI is unique. + // The returned list will be sorted by uri first, then encrypt and id last. + // Having the list sorted allows the caller fo compare filtered lists for + // equality to detect when changes have been made. static const std::vector DeduplicateHeaderExtensions( const std::vector& extensions, Filter filter); @@ -489,8 +487,6 @@ struct RTC_EXPORT RtpEncodingParameters { // Specifies the number of temporal layers for video (if the feature is // supported by the codec implementation). - // TODO(asapersson): Different number of temporal layers are not supported - // per simulcast layer. // Screencast support is experimental. absl::optional num_temporal_layers; @@ -537,7 +533,7 @@ struct RTC_EXPORT RtpCodecParameters { RtpCodecParameters(const RtpCodecParameters&); ~RtpCodecParameters(); - // Build MIME "type/subtype" string from |name| and |kind|. + // Build MIME "type/subtype" string from `name` and `kind`. std::string mime_type() const { return MediaTypeToString(kind) + "/" + name; } // Used to identify the codec. Equivalent to MIME subtype. @@ -562,7 +558,7 @@ struct RTC_EXPORT RtpCodecParameters { absl::optional num_channels; // The maximum packetization time to be used by an RtpSender. - // If |ptime| is also set, this will be ignored. + // If `ptime` is also set, this will be ignored. // TODO(deadbeef): Not implemented. absl::optional max_ptime; @@ -607,7 +603,7 @@ struct RTC_EXPORT RtpCapabilities { // Supported Forward Error Correction (FEC) mechanisms. Note that the RED, // ulpfec and flexfec codecs used by these mechanisms will still appear in - // |codecs|. + // `codecs`. std::vector fec; bool operator==(const RtpCapabilities& o) const { diff --git a/api/rtp_parameters_unittest.cc b/api/rtp_parameters_unittest.cc index 51ad426748..234c3c9b6d 100644 --- a/api/rtp_parameters_unittest.cc +++ b/api/rtp_parameters_unittest.cc @@ -109,6 +109,38 @@ TEST(RtpExtensionTest, DeduplicateHeaderExtensions) { EXPECT_EQ((std::vector{kExtension1Encrypted}), filtered); } +// Test that the filtered vector is sorted so that for a given unsorted array of +// extensions, the filtered vector will always be laied out the same (for easy +// comparison). +TEST(RtpExtensionTest, DeduplicateHeaderExtensionsSorted) { + const std::vector extensions = { + RtpExtension("cde1", 11, false), RtpExtension("cde2", 12, true), + RtpExtension("abc1", 3, false), RtpExtension("abc2", 4, true), + RtpExtension("cde3", 9, true), RtpExtension("cde4", 10, false), + RtpExtension("abc3", 1, true), RtpExtension("abc4", 2, false), + RtpExtension("bcd3", 7, false), RtpExtension("bcd1", 8, true), + RtpExtension("bcd2", 5, true), RtpExtension("bcd4", 6, false), + }; + + auto encrypted = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kRequireEncryptedExtension); + + const std::vector expected_sorted_encrypted = { + RtpExtension("abc2", 4, true), RtpExtension("abc3", 1, true), + RtpExtension("bcd1", 8, true), RtpExtension("bcd2", 5, true), + RtpExtension("cde2", 12, true), RtpExtension("cde3", 9, true)}; + EXPECT_EQ(expected_sorted_encrypted, encrypted); + + auto unencypted = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kDiscardEncryptedExtension); + + const std::vector expected_sorted_unencrypted = { + RtpExtension("abc1", 3, false), RtpExtension("abc4", 2, false), + RtpExtension("bcd3", 7, false), RtpExtension("bcd4", 6, false), + RtpExtension("cde1", 11, false), RtpExtension("cde4", 10, false)}; + EXPECT_EQ(expected_sorted_unencrypted, unencypted); +} + TEST(RtpExtensionTest, FindHeaderExtensionByUriAndEncryption) { std::vector extensions; diff --git a/api/rtp_receiver_interface.h b/api/rtp_receiver_interface.h index 327c9f2fee..e4ec9b5986 100644 --- a/api/rtp_receiver_interface.h +++ b/api/rtp_receiver_interface.h @@ -54,7 +54,7 @@ class RTC_EXPORT RtpReceiverInterface : public rtc::RefCountInterface { // TODO(https://bugs.webrtc.org/907849) remove default implementation virtual rtc::scoped_refptr dtls_transport() const; - // The list of streams that |track| is associated with. This is the same as + // The list of streams that `track` is associated with. This is the same as // the [[AssociatedRemoteMediaStreams]] internal slot in the spec. // https://w3c.github.io/webrtc-pc/#dfn-associatedremotemediastreams // TODO(hbos): Make pure virtual as soon as Chromium's mock implements this. @@ -84,8 +84,8 @@ class RTC_EXPORT RtpReceiverInterface : public rtc::RefCountInterface { virtual void SetObserver(RtpReceiverObserverInterface* observer) = 0; // Sets the jitter buffer minimum delay until media playout. Actual observed - // delay may differ depending on the congestion control. |delay_seconds| is a - // positive value including 0.0 measured in seconds. |nullopt| means default + // delay may differ depending on the congestion control. `delay_seconds` is a + // positive value including 0.0 measured in seconds. `nullopt` means default // value must be used. virtual void SetJitterBufferMinimumDelay( absl::optional delay_seconds) = 0; diff --git a/api/rtp_transceiver_interface.cc b/api/rtp_transceiver_interface.cc index fd5085c336..7267b286be 100644 --- a/api/rtp_transceiver_interface.cc +++ b/api/rtp_transceiver_interface.cc @@ -34,39 +34,14 @@ void RtpTransceiverInterface::Stop() { } RTCError RtpTransceiverInterface::StopStandard() { - RTC_NOTREACHED() << "DEBUG: RtpTransceiverInterface::StopStandard called"; + RTC_DCHECK_NOTREACHED() + << "DEBUG: RtpTransceiverInterface::StopStandard called"; return RTCError::OK(); } void RtpTransceiverInterface::StopInternal() { - RTC_NOTREACHED() << "DEBUG: RtpTransceiverInterface::StopInternal called"; -} - -RTCError RtpTransceiverInterface::SetCodecPreferences( - rtc::ArrayView) { - RTC_NOTREACHED() << "Not implemented"; - return {}; -} - -std::vector RtpTransceiverInterface::codec_preferences() - const { - return {}; -} - -std::vector -RtpTransceiverInterface::HeaderExtensionsToOffer() const { - return {}; -} - -webrtc::RTCError RtpTransceiverInterface::SetOfferedRtpHeaderExtensions( - rtc::ArrayView - header_extensions_to_offer) { - return webrtc::RTCError(webrtc::RTCErrorType::UNSUPPORTED_OPERATION); -} - -std::vector -RtpTransceiverInterface::HeaderExtensionsNegotiated() const { - return {}; + RTC_DCHECK_NOTREACHED() + << "DEBUG: RtpTransceiverInterface::StopInternal called"; } // TODO(bugs.webrtc.org/11839) Remove default implementations when clients @@ -78,7 +53,7 @@ void RtpTransceiverInterface::SetDirection( RTCError RtpTransceiverInterface::SetDirectionWithError( RtpTransceiverDirection new_direction) { - RTC_NOTREACHED() << "Default implementation called"; + RTC_DCHECK_NOTREACHED() << "Default implementation called"; return RTCError::OK(); } diff --git a/api/rtp_transceiver_interface.h b/api/rtp_transceiver_interface.h index 4799c4b153..c9d911fac1 100644 --- a/api/rtp_transceiver_interface.h +++ b/api/rtp_transceiver_interface.h @@ -97,8 +97,7 @@ class RTC_EXPORT RtpTransceiverInterface : public rtc::RefCountInterface { // transceiver's stop() method has been called, but the negotiation with // the other end for shutting down the transceiver is not yet done. // https://w3c.github.io/webrtc-pc/#dfn-stopping-0 - // TODO(hta): Remove default implementation. - virtual bool stopping() const; + virtual bool stopping() const = 0; // The direction attribute indicates the preferred direction of this // transceiver, which will be used in calls to CreateOffer and CreateAnswer. @@ -147,28 +146,28 @@ class RTC_EXPORT RtpTransceiverInterface : public rtc::RefCountInterface { // by WebRTC for this transceiver. // https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-setcodecpreferences virtual RTCError SetCodecPreferences( - rtc::ArrayView codecs); - virtual std::vector codec_preferences() const; + rtc::ArrayView codecs) = 0; + virtual std::vector codec_preferences() const = 0; // Readonly attribute which contains the set of header extensions that was set // with SetOfferedRtpHeaderExtensions, or a default set if it has not been // called. // https://w3c.github.io/webrtc-extensions/#rtcrtptransceiver-interface virtual std::vector HeaderExtensionsToOffer() - const; + const = 0; // Readonly attribute which is either empty if negotation has not yet // happened, or a vector of the negotiated header extensions. // https://w3c.github.io/webrtc-extensions/#rtcrtptransceiver-interface virtual std::vector HeaderExtensionsNegotiated() - const; + const = 0; // The SetOfferedRtpHeaderExtensions method modifies the next SDP negotiation // so that it negotiates use of header extensions which are not kStopped. // https://w3c.github.io/webrtc-extensions/#rtcrtptransceiver-interface virtual webrtc::RTCError SetOfferedRtpHeaderExtensions( rtc::ArrayView - header_extensions_to_offer); + header_extensions_to_offer) = 0; protected: ~RtpTransceiverInterface() override = default; diff --git a/api/scoped_refptr.h b/api/scoped_refptr.h index 4e3f0ebfc8..8f45f89206 100644 --- a/api/scoped_refptr.h +++ b/api/scoped_refptr.h @@ -24,13 +24,13 @@ // void some_function() { // scoped_refptr foo = new MyFoo(); // foo->Method(param); -// // |foo| is released when this function returns +// // `foo` is released when this function returns // } // // void some_other_function() { // scoped_refptr foo = new MyFoo(); // ... -// foo = nullptr; // explicitly releases |foo| +// foo = nullptr; // explicitly releases `foo` // ... // if (foo) // foo->Method(param); @@ -45,10 +45,10 @@ // scoped_refptr b; // // b.swap(a); -// // now, |b| references the MyFoo object, and |a| references null. +// // now, `b` references the MyFoo object, and `a` references null. // } // -// To make both |a| and |b| in the above example reference the same MyFoo +// To make both `a` and `b` in the above example reference the same MyFoo // object, simply use the assignment operator: // // { @@ -56,7 +56,7 @@ // scoped_refptr b; // // b = a; -// // now, |a| and |b| each own a reference to the same MyFoo object. +// // now, `a` and `b` each own a reference to the same MyFoo object. // } // @@ -74,8 +74,9 @@ class scoped_refptr { typedef T element_type; scoped_refptr() : ptr_(nullptr) {} + scoped_refptr(std::nullptr_t) : ptr_(nullptr) {} // NOLINT(runtime/explicit) - scoped_refptr(T* p) : ptr_(p) { // NOLINT(runtime/explicit) + explicit scoped_refptr(T* p) : ptr_(p) { if (ptr_) ptr_->AddRef(); } diff --git a/api/scoped_refptr_unittest.cc b/api/scoped_refptr_unittest.cc index 75a202bccd..22b61209cd 100644 --- a/api/scoped_refptr_unittest.cc +++ b/api/scoped_refptr_unittest.cc @@ -48,7 +48,7 @@ class ScopedRefCounted { TEST(ScopedRefptrTest, IsCopyConstructable) { FunctionsCalled called; - scoped_refptr ptr = new ScopedRefCounted(&called); + scoped_refptr ptr(new ScopedRefCounted(&called)); scoped_refptr another_ptr = ptr; EXPECT_TRUE(ptr); @@ -59,7 +59,7 @@ TEST(ScopedRefptrTest, IsCopyConstructable) { TEST(ScopedRefptrTest, IsCopyAssignable) { FunctionsCalled called; scoped_refptr another_ptr; - scoped_refptr ptr = new ScopedRefCounted(&called); + scoped_refptr ptr(new ScopedRefCounted(&called)); another_ptr = ptr; EXPECT_TRUE(ptr); @@ -69,7 +69,7 @@ TEST(ScopedRefptrTest, IsCopyAssignable) { TEST(ScopedRefptrTest, IsMoveConstructableWithoutExtraAddRefRelease) { FunctionsCalled called; - scoped_refptr ptr = new ScopedRefCounted(&called); + scoped_refptr ptr(new ScopedRefCounted(&called)); scoped_refptr another_ptr = std::move(ptr); EXPECT_FALSE(ptr); @@ -81,7 +81,7 @@ TEST(ScopedRefptrTest, IsMoveConstructableWithoutExtraAddRefRelease) { TEST(ScopedRefptrTest, IsMoveAssignableWithoutExtraAddRefRelease) { FunctionsCalled called; scoped_refptr another_ptr; - scoped_refptr ptr = new ScopedRefCounted(&called); + scoped_refptr ptr(new ScopedRefCounted(&called)); another_ptr = std::move(ptr); EXPECT_FALSE(ptr); @@ -100,8 +100,8 @@ TEST(ScopedRefptrTest, MovableDuringVectorReallocation) { std::vector> ptrs; ptrs.reserve(1); // Insert more elements than reserved to provoke reallocation. - ptrs.push_back(new ScopedRefCounted(&called)); - ptrs.push_back(new ScopedRefCounted(&called)); + ptrs.emplace_back(new ScopedRefCounted(&called)); + ptrs.emplace_back(new ScopedRefCounted(&called)); EXPECT_EQ(called.addref, 2); EXPECT_EQ(called.release, 0); diff --git a/api/set_local_description_observer_interface.h b/api/set_local_description_observer_interface.h index 90d000cd81..8e7b6258d3 100644 --- a/api/set_local_description_observer_interface.h +++ b/api/set_local_description_observer_interface.h @@ -21,7 +21,7 @@ namespace webrtc { // the observer to examine the effects of the operation without delay. class SetLocalDescriptionObserverInterface : public rtc::RefCountInterface { public: - // On success, |error.ok()| is true. + // On success, `error.ok()` is true. virtual void OnSetLocalDescriptionComplete(RTCError error) = 0; }; diff --git a/api/set_remote_description_observer_interface.h b/api/set_remote_description_observer_interface.h index 178255564a..d1c075309f 100644 --- a/api/set_remote_description_observer_interface.h +++ b/api/set_remote_description_observer_interface.h @@ -22,7 +22,7 @@ namespace webrtc { // operation. class SetRemoteDescriptionObserverInterface : public rtc::RefCountInterface { public: - // On success, |error.ok()| is true. + // On success, `error.ok()` is true. virtual void OnSetRemoteDescriptionComplete(RTCError error) = 0; }; diff --git a/api/stats/rtc_stats.h b/api/stats/rtc_stats.h index 9290e803fa..a5fae52c29 100644 --- a/api/stats/rtc_stats.h +++ b/api/stats/rtc_stats.h @@ -35,7 +35,7 @@ class RTCStatsMemberInterface; // static const char kType[]; // It is used as a unique class identifier and a string representation of the // class type, see https://w3c.github.io/webrtc-stats/#rtcstatstype-str*. -// Use the |WEBRTC_RTCSTATS_IMPL| macro when implementing subclasses, see macro +// Use the `WEBRTC_RTCSTATS_IMPL` macro when implementing subclasses, see macro // for details. // // Derived classes list their dictionary members, RTCStatsMember, as public @@ -47,7 +47,7 @@ class RTCStatsMemberInterface; // foo.baz->push_back("hello world"); // uint32_t x = *foo.bar; // -// Pointers to all the members are available with |Members|, allowing iteration: +// Pointers to all the members are available with `Members`, allowing iteration: // // for (const RTCStatsMemberInterface* member : foo.Members()) { // printf("%s = %s\n", member->name(), member->ValueToString().c_str()); @@ -65,11 +65,11 @@ class RTC_EXPORT RTCStats { const std::string& id() const { return id_; } // Time relative to the UNIX epoch (Jan 1, 1970, UTC), in microseconds. int64_t timestamp_us() const { return timestamp_us_; } - // Returns the static member variable |kType| of the implementing class. + // Returns the static member variable `kType` of the implementing class. virtual const char* type() const = 0; - // Returns a vector of pointers to all the |RTCStatsMemberInterface| members + // Returns a vector of pointers to all the `RTCStatsMemberInterface` members // of this class. This allows for iteration of members. For a given class, - // |Members| always returns the same members in the same order. + // `Members` always returns the same members in the same order. std::vector Members() const; // Checks if the two stats objects are of the same type and have the same // member values. Timestamps are not compared. These operators are exposed for @@ -81,8 +81,8 @@ class RTC_EXPORT RTCStats { // object, listing all of its members (names and values). std::string ToJson() const; - // Downcasts the stats object to an |RTCStats| subclass |T|. DCHECKs that the - // object is of type |T|. + // Downcasts the stats object to an `RTCStats` subclass `T`. DCHECKs that the + // object is of type `T`. template const T& cast_to() const { RTC_DCHECK_EQ(type(), T::kType); @@ -90,8 +90,8 @@ class RTC_EXPORT RTCStats { } protected: - // Gets a vector of all members of this |RTCStats| object, including members - // derived from parent classes. |additional_capacity| is how many more members + // Gets a vector of all members of this `RTCStats` object, including members + // derived from parent classes. `additional_capacity` is how many more members // shall be reserved in the vector (so that subclasses can allocate a vector // with room for both parent and child members without it having to resize). virtual std::vector @@ -101,21 +101,21 @@ class RTC_EXPORT RTCStats { int64_t timestamp_us_; }; -// All |RTCStats| classes should use these macros. -// |WEBRTC_RTCSTATS_DECL| is placed in a public section of the class definition. -// |WEBRTC_RTCSTATS_IMPL| is placed outside the class definition (in a .cc). +// All `RTCStats` classes should use these macros. +// `WEBRTC_RTCSTATS_DECL` is placed in a public section of the class definition. +// `WEBRTC_RTCSTATS_IMPL` is placed outside the class definition (in a .cc). // -// These macros declare (in _DECL) and define (in _IMPL) the static |kType| and -// overrides methods as required by subclasses of |RTCStats|: |copy|, |type| and -// |MembersOfThisObjectAndAncestors|. The |...| argument is a list of addresses +// These macros declare (in _DECL) and define (in _IMPL) the static `kType` and +// overrides methods as required by subclasses of `RTCStats`: `copy`, `type` and +// `MembersOfThisObjectAndAncestors`. The |...| argument is a list of addresses // to each member defined in the implementing class. The list must have at least // one member. // // (Since class names need to be known to implement these methods this cannot be -// part of the base |RTCStats|. While these methods could be implemented using +// part of the base `RTCStats`. While these methods could be implemented using // templates, that would only work for immediate subclasses. Subclasses of // subclasses also have to override these methods, resulting in boilerplate -// code. Using a macro avoids this and works for any |RTCStats| class, including +// code. Using a macro avoids this and works for any `RTCStats` class, including // grandchildren.) // // Sample usage: @@ -215,10 +215,10 @@ enum class NonStandardGroupId { kRtcStatsRelativePacketArrivalDelay, }; -// Interface for |RTCStats| members, which have a name and a value of a type -// defined in a subclass. Only the types listed in |Type| are supported, these -// are implemented by |RTCStatsMember|. The value of a member may be -// undefined, the value can only be read if |is_defined|. +// Interface for `RTCStats` members, which have a name and a value of a type +// defined in a subclass. Only the types listed in `Type` are supported, these +// are implemented by `RTCStatsMember`. The value of a member may be +// undefined, the value can only be read if `is_defined`. class RTCStatsMemberInterface { public: // Member value types. @@ -284,9 +284,9 @@ class RTCStatsMemberInterface { bool is_defined_; }; -// Template implementation of |RTCStatsMemberInterface|. +// Template implementation of `RTCStatsMemberInterface`. // The supported types are the ones described by -// |RTCStatsMemberInterface::Type|. +// `RTCStatsMemberInterface::Type`. template class RTCStatsMember : public RTCStatsMemberInterface { public: diff --git a/api/stats/rtc_stats_report.h b/api/stats/rtc_stats_report.h index 0fe5ce91f9..2ced422370 100644 --- a/api/stats/rtc_stats_report.h +++ b/api/stats/rtc_stats_report.h @@ -30,7 +30,7 @@ namespace webrtc { // A collection of stats. -// This is accessible as a map from |RTCStats::id| to |RTCStats|. +// This is accessible as a map from `RTCStats::id` to `RTCStats`. class RTC_EXPORT RTCStatsReport final : public rtc::RefCountedNonVirtual { public: @@ -71,8 +71,8 @@ class RTC_EXPORT RTCStatsReport final const RTCStats* Get(const std::string& id) const; size_t size() const { return stats_.size(); } - // Gets the stat object of type |T| by ID, where |T| is any class descending - // from |RTCStats|. + // Gets the stat object of type `T` by ID, where `T` is any class descending + // from `RTCStats`. // Returns null if there is no stats object for the given ID or it is the // wrong type. template @@ -85,17 +85,17 @@ class RTC_EXPORT RTCStatsReport final } // Removes the stats object from the report, returning ownership of it or null - // if there is no object with |id|. + // if there is no object with `id`. std::unique_ptr Take(const std::string& id); - // Takes ownership of all the stats in |other|, leaving it empty. + // Takes ownership of all the stats in `other`, leaving it empty. void TakeMembersFrom(rtc::scoped_refptr other); - // Stats iterators. Stats are ordered lexicographically on |RTCStats::id|. + // Stats iterators. Stats are ordered lexicographically on `RTCStats::id`. ConstIterator begin() const; ConstIterator end() const; - // Gets the subset of stats that are of type |T|, where |T| is any class - // descending from |RTCStats|. + // Gets the subset of stats that are of type `T`, where `T` is any class + // descending from `RTCStats`. template std::vector GetStatsOfType() const { std::vector stats_of_type; diff --git a/api/stats/rtcstats_objects.h b/api/stats/rtcstats_objects.h index 2030380918..f38d962e15 100644 --- a/api/stats/rtcstats_objects.h +++ b/api/stats/rtcstats_objects.h @@ -57,7 +57,7 @@ struct RTCDtlsTransportState { static const char* const kFailed; }; -// |RTCMediaStreamTrackStats::kind| is not an enum in the spec but the only +// `RTCMediaStreamTrackStats::kind` is not an enum in the spec but the only // valid values are "audio" and "video". // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-kind struct RTCMediaStreamTrackKind { @@ -170,6 +170,8 @@ class RTC_EXPORT RTCIceCandidatePairStats final : public RTCStats { RTCStatsMember writable; // TODO(hbos): Collect and populate this value. https://bugs.webrtc.org/7062 RTCStatsMember readable; + RTCStatsMember packets_sent; + RTCStatsMember packets_received; RTCStatsMember bytes_sent; RTCStatsMember bytes_received; RTCStatsMember total_round_trip_time; @@ -194,10 +196,12 @@ class RTC_EXPORT RTCIceCandidatePairStats final : public RTCStats { RTCStatsMember consent_responses_received; // TODO(hbos): Collect and populate this value. https://bugs.webrtc.org/7062 RTCStatsMember consent_responses_sent; + RTCStatsMember packets_discarded_on_send; + RTCStatsMember bytes_discarded_on_send; }; // https://w3c.github.io/webrtc-stats/#icecandidate-dict* -// TODO(hbos): |RTCStatsCollector| only collects candidates that are part of +// TODO(hbos): `RTCStatsCollector` only collects candidates that are part of // ice candidate pairs, but there could be candidates not paired with anything. // crbug.com/632723 // TODO(qingsi): Add the stats of STUN binding requests (keepalives) and collect @@ -221,7 +225,6 @@ class RTC_EXPORT RTCIceCandidateStats : public RTCStats { // TODO(hbos): Support enum types? "RTCStatsMember"? RTCStatsMember candidate_type; RTCStatsMember priority; - // TODO(hbos): Not collected by |RTCStatsCollector|. crbug.com/632723 RTCStatsMember url; protected: @@ -232,8 +235,8 @@ class RTC_EXPORT RTCIceCandidateStats : public RTCStats { }; // In the spec both local and remote varieties are of type RTCIceCandidateStats. -// But here we define them as subclasses of |RTCIceCandidateStats| because the -// |kType| need to be different ("RTCStatsType type") in the local/remote case. +// But here we define them as subclasses of `RTCIceCandidateStats` because the +// `kType` need to be different ("RTCStatsType type") in the local/remote case. // https://w3c.github.io/webrtc-stats/#rtcstatstype-str* // This forces us to have to override copy() and type(). class RTC_EXPORT RTCLocalIceCandidateStats final : public RTCIceCandidateStats { @@ -289,28 +292,28 @@ class RTC_EXPORT RTCMediaStreamTrackStats final : public RTCStats { RTCStatsMember media_source_id; RTCStatsMember remote_source; RTCStatsMember ended; - // TODO(hbos): |RTCStatsCollector| does not return stats for detached tracks. + // TODO(hbos): `RTCStatsCollector` does not return stats for detached tracks. // crbug.com/659137 RTCStatsMember detached; - // See |RTCMediaStreamTrackKind| for valid values. + // See `RTCMediaStreamTrackKind` for valid values. RTCStatsMember kind; RTCStatsMember jitter_buffer_delay; RTCStatsMember jitter_buffer_emitted_count; // Video-only members RTCStatsMember frame_width; RTCStatsMember frame_height; - // TODO(hbos): Not collected by |RTCStatsCollector|. crbug.com/659137 + // TODO(hbos): Not collected by `RTCStatsCollector`. crbug.com/659137 RTCStatsMember frames_per_second; RTCStatsMember frames_sent; RTCStatsMember huge_frames_sent; RTCStatsMember frames_received; RTCStatsMember frames_decoded; RTCStatsMember frames_dropped; - // TODO(hbos): Not collected by |RTCStatsCollector|. crbug.com/659137 + // TODO(hbos): Not collected by `RTCStatsCollector`. crbug.com/659137 RTCStatsMember frames_corrupted; - // TODO(hbos): Not collected by |RTCStatsCollector|. crbug.com/659137 + // TODO(hbos): Not collected by `RTCStatsCollector`. crbug.com/659137 RTCStatsMember partial_frames_lost; - // TODO(hbos): Not collected by |RTCStatsCollector|. crbug.com/659137 + // TODO(hbos): Not collected by `RTCStatsCollector`. crbug.com/659137 RTCStatsMember full_frames_lost; // Audio-only members RTCStatsMember audio_level; // Receive-only @@ -399,13 +402,14 @@ class RTC_EXPORT RTCReceivedRtpStreamStats : public RTCRTPStreamStats { // TODO(hbos) The following fields need to be added and migrated // both from RTCInboundRtpStreamStats and RTCRemoteInboundRtpStreamStats: - // packetsReceived, packetsDiscarded, packetsRepaired, burstPacketsLost, + // packetsReceived, packetsRepaired, burstPacketsLost, // burstPacketDiscarded, burstLossCount, burstDiscardCount, burstLossRate, // burstDiscardRate, gapLossRate, gapDiscardRate, framesDropped, // partialFramesLost, fullFramesLost // crbug.com/webrtc/12532 RTCStatsMember jitter; RTCStatsMember packets_lost; // Signed per RFC 3550 + RTCStatsMember packets_discarded; protected: RTCReceivedRtpStreamStats(const std::string&& id, int64_t timestamp_us); @@ -463,8 +467,6 @@ class RTC_EXPORT RTCInboundRTPStreamStats final // TODO(hbos): Collect and populate this value. https://bugs.webrtc.org/7065 RTCStatsMember round_trip_time; // TODO(hbos): Collect and populate this value. https://bugs.webrtc.org/7065 - RTCStatsMember packets_discarded; - // TODO(hbos): Collect and populate this value. https://bugs.webrtc.org/7065 RTCStatsMember packets_repaired; // TODO(hbos): Collect and populate this value. https://bugs.webrtc.org/7065 RTCStatsMember burst_packets_lost; @@ -526,7 +528,7 @@ class RTC_EXPORT RTCOutboundRTPStreamStats final : public RTCRTPStreamStats { RTCStatsMember bytes_sent; RTCStatsMember header_bytes_sent; RTCStatsMember retransmitted_bytes_sent; - // TODO(hbos): Collect and populate this value. https://bugs.webrtc.org/7066 + // TODO(https://crbug.com/webrtc/13394): Also collect this metric for video. RTCStatsMember target_bitrate; RTCStatsMember frames_encoded; RTCStatsMember key_frames_encoded; @@ -569,7 +571,7 @@ class RTC_EXPORT RTCRemoteInboundRtpStreamStats final ~RTCRemoteInboundRtpStreamStats() override; // TODO(hbos): The following RTCReceivedRtpStreamStats metrics should also be - // implemented: packetsReceived, packetsDiscarded, packetsRepaired, + // implemented: packetsReceived, packetsRepaired, // burstPacketsLost, burstPacketsDiscarded, burstLossCount, burstDiscardCount, // burstLossRate, burstDiscardRate, gapLossRate and gapDiscardRate. // RTCRemoteInboundRtpStreamStats @@ -594,6 +596,9 @@ class RTC_EXPORT RTCRemoteOutboundRtpStreamStats final RTCStatsMember local_id; RTCStatsMember remote_timestamp; RTCStatsMember reports_sent; + RTCStatsMember round_trip_time; + RTCStatsMember round_trip_time_measurements; + RTCStatsMember total_round_trip_time; }; // https://w3c.github.io/webrtc-stats/#dom-rtcmediasourcestats @@ -642,7 +647,7 @@ class RTC_EXPORT RTCVideoSourceStats final : public RTCMediaSourceStats { RTCStatsMember width; RTCStatsMember height; RTCStatsMember frames; - RTCStatsMember frames_per_second; + RTCStatsMember frames_per_second; }; // https://w3c.github.io/webrtc-stats/#transportstats-dict* diff --git a/api/stats_types.cc b/api/stats_types.cc index 6fdc7e85a5..b044e4ab11 100644 --- a/api/stats_types.cc +++ b/api/stats_types.cc @@ -23,8 +23,6 @@ // generate strongly typed inline C++ code that forces the correct type to be // used for a given name at compile time. -using rtc::RefCountedObject; - namespace webrtc { namespace { @@ -60,7 +58,7 @@ const char* InternalTypeToString(StatsReport::StatsType type) { case StatsReport::kStatsReportTypeDataChannel: return "datachannel"; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return nullptr; } @@ -293,7 +291,7 @@ bool StatsReport::Value::Equals(const Value& other) const { case kId: return (*value_.id_)->Equals(*other.value_.id_); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return false; } @@ -650,12 +648,15 @@ const char* StatsReport::Value::display_name() const { return "googTrackId"; case kStatsValueNameTimingFrameInfo: return "googTimingFrameInfo"; + // TODO(bugs.webrtc.org/11226): Remove. case kStatsValueNameTypingNoiseState: return "googTypingNoiseState"; case kStatsValueNameWritable: return "googWritable"; case kStatsValueNameAudioDeviceUnderrunCounter: return "googAudioDeviceUnderrunCounter"; + case kStatsValueNameLocalCandidateRelayProtocol: + return "googLocalCandidateRelayProtocol"; } return nullptr; @@ -678,7 +679,7 @@ std::string StatsReport::Value::ToString() const { case kId: return (*value_.id_)->ToString(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return std::string(); } @@ -690,17 +691,17 @@ StatsReport::~StatsReport() = default; // static StatsReport::Id StatsReport::NewBandwidthEstimationId() { - return Id(new RefCountedObject()); + return rtc::make_ref_counted(); } // static StatsReport::Id StatsReport::NewTypedId(StatsType type, const std::string& id) { - return Id(new RefCountedObject(type, id)); + return rtc::make_ref_counted(type, id); } // static StatsReport::Id StatsReport::NewTypedIntId(StatsType type, int id) { - return Id(new RefCountedObject(type, id)); + return rtc::make_ref_counted(type, id); } // static @@ -708,26 +709,25 @@ StatsReport::Id StatsReport::NewIdWithDirection( StatsType type, const std::string& id, StatsReport::Direction direction) { - return Id(new RefCountedObject(type, id, direction)); + return rtc::make_ref_counted(type, id, direction); } // static StatsReport::Id StatsReport::NewCandidateId(bool local, const std::string& id) { - return Id(new RefCountedObject(local, id)); + return rtc::make_ref_counted(local, id); } // static StatsReport::Id StatsReport::NewComponentId(const std::string& content_name, int component) { - return Id(new RefCountedObject(content_name, component)); + return rtc::make_ref_counted(content_name, component); } // static StatsReport::Id StatsReport::NewCandidatePairId(const std::string& content_name, int component, int index) { - return Id( - new RefCountedObject(content_name, component, index)); + return rtc::make_ref_counted(content_name, component, index); } const char* StatsReport::TypeToString() const { @@ -835,7 +835,7 @@ StatsReport* StatsCollection::ReplaceOrAddNew(const StatsReport::Id& id) { return InsertNew(id); } -// Looks for a report with the given |id|. If one is not found, null +// Looks for a report with the given `id`. If one is not found, null // will be returned. StatsReport* StatsCollection::Find(const StatsReport::Id& id) { RTC_DCHECK(thread_checker_.IsCurrent()); diff --git a/api/stats_types.h b/api/stats_types.h index d032462da6..e7dd528e62 100644 --- a/api/stats_types.h +++ b/api/stats_types.h @@ -22,7 +22,6 @@ #include "api/scoped_refptr.h" #include "api/sequence_checker.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/ref_count.h" #include "rtc_base/system/rtc_export.h" @@ -39,58 +38,58 @@ class RTC_EXPORT StatsReport { enum StatsType { // StatsReport types. - // A StatsReport of |type| = "googSession" contains overall information + // A StatsReport of `type` = "googSession" contains overall information // about the thing libjingle calls a session (which may contain one // or more RTP sessions. kStatsReportTypeSession, - // A StatsReport of |type| = "googTransport" contains information + // A StatsReport of `type` = "googTransport" contains information // about a libjingle "transport". kStatsReportTypeTransport, - // A StatsReport of |type| = "googComponent" contains information + // A StatsReport of `type` = "googComponent" contains information // about a libjingle "channel" (typically, RTP or RTCP for a transport). // This is intended to be the same thing as an ICE "Component". kStatsReportTypeComponent, - // A StatsReport of |type| = "googCandidatePair" contains information + // A StatsReport of `type` = "googCandidatePair" contains information // about a libjingle "connection" - a single source/destination port pair. // This is intended to be the same thing as an ICE "candidate pair". kStatsReportTypeCandidatePair, - // A StatsReport of |type| = "VideoBWE" is statistics for video Bandwidth - // Estimation, which is global per-session. The |id| field is "bweforvideo" + // A StatsReport of `type` = "VideoBWE" is statistics for video Bandwidth + // Estimation, which is global per-session. The `id` field is "bweforvideo" // (will probably change in the future). kStatsReportTypeBwe, - // A StatsReport of |type| = "ssrc" is statistics for a specific rtp stream. - // The |id| field is the SSRC in decimal form of the rtp stream. + // A StatsReport of `type` = "ssrc" is statistics for a specific rtp stream. + // The `id` field is the SSRC in decimal form of the rtp stream. kStatsReportTypeSsrc, - // A StatsReport of |type| = "remoteSsrc" is statistics for a specific + // A StatsReport of `type` = "remoteSsrc" is statistics for a specific // rtp stream, generated by the remote end of the connection. kStatsReportTypeRemoteSsrc, - // A StatsReport of |type| = "googTrack" is statistics for a specific media - // track. The |id| field is the track id. + // A StatsReport of `type` = "googTrack" is statistics for a specific media + // track. The `id` field is the track id. kStatsReportTypeTrack, - // A StatsReport of |type| = "localcandidate" or "remotecandidate" is + // A StatsReport of `type` = "localcandidate" or "remotecandidate" is // attributes on a specific ICE Candidate. It links to its connection pair // by candidate id. The string value is taken from // http://w3c.github.io/webrtc-stats/#rtcstatstype-enum*. kStatsReportTypeIceLocalCandidate, kStatsReportTypeIceRemoteCandidate, - // A StatsReport of |type| = "googCertificate" contains an SSL certificate - // transmitted by one of the endpoints of this connection. The |id| is + // A StatsReport of `type` = "googCertificate" contains an SSL certificate + // transmitted by one of the endpoints of this connection. The `id` is // controlled by the fingerprint, and is used to identify the certificate in // the Channel stats (as "googLocalCertificateId" or // "googRemoteCertificateId") and in any child certificates (as // "googIssuerId"). kStatsReportTypeCertificate, - // A StatsReport of |type| = "datachannel" with statistics for a + // A StatsReport of `type` = "datachannel" with statistics for a // particular DataChannel. kStatsReportTypeDataChannel, }; @@ -232,13 +231,15 @@ class RTC_EXPORT StatsReport { kStatsValueNameSrtpCipher, kStatsValueNameTargetDelayMs, kStatsValueNameTargetEncBitrate, - kStatsValueNameTimingFrameInfo, // Result of |TimingFrameInfo::ToString| + kStatsValueNameTimingFrameInfo, // Result of `TimingFrameInfo::ToString` kStatsValueNameTrackId, kStatsValueNameTransmitBitrate, kStatsValueNameTransportType, + // TODO(bugs.webrtc.org/11226): Remove. kStatsValueNameTypingNoiseState, kStatsValueNameWritable, kStatsValueNameAudioDeviceUnderrunCounter, + kStatsValueNameLocalCandidateRelayProtocol, }; class RTC_EXPORT IdBase : public rtc::RefCountInterface { @@ -287,6 +288,9 @@ class RTC_EXPORT StatsReport { ~Value(); + Value(const Value&) = delete; + Value& operator=(const Value&) = delete; + // Support ref counting. Note that for performance reasons, we // don't use thread safe operations. Therefore, all operations // affecting the ref count (in practice, creation and copying of @@ -331,7 +335,7 @@ class RTC_EXPORT StatsReport { bool bool_val() const; const Id& id_val() const; - // Returns the string representation of |name|. + // Returns the string representation of `name`. const char* display_name() const; // Converts the native value to a string representation of the value. @@ -339,7 +343,7 @@ class RTC_EXPORT StatsReport { Type type() const { return type_; } - // TODO(tommi): Move |name| and |display_name| out of the Value struct. + // TODO(tommi): Move `name` and `display_name` out of the Value struct. const StatsValueName name; private: @@ -357,17 +361,18 @@ class RTC_EXPORT StatsReport { const char* static_string_; Id* id_; } value_; - - RTC_DISALLOW_COPY_AND_ASSIGN(Value); }; typedef rtc::scoped_refptr ValuePtr; typedef std::map Values; - // Ownership of |id| is passed to |this|. + // Ownership of `id` is passed to `this`. explicit StatsReport(const Id& id); ~StatsReport(); + StatsReport(const StatsReport&) = delete; + StatsReport& operator=(const StatsReport&) = delete; + // Factory functions for various types of stats IDs. static Id NewBandwidthEstimationId(); static Id NewTypedId(StatsType type, const std::string& id); @@ -407,8 +412,6 @@ class RTC_EXPORT StatsReport { const Id id_; double timestamp_; // Time since 1970-01-01T00:00:00Z in milliseconds. Values values_; - - RTC_DISALLOW_COPY_AND_ASSIGN(StatsReport); }; // Typedef for an array of const StatsReport pointers. @@ -434,13 +437,13 @@ class StatsCollection { const_iterator end() const; size_t size() const; - // Creates a new report object with |id| that does not already + // Creates a new report object with `id` that does not already // exist in the list of reports. StatsReport* InsertNew(const StatsReport::Id& id); StatsReport* FindOrAddNew(const StatsReport::Id& id); StatsReport* ReplaceOrAddNew(const StatsReport::Id& id); - // Looks for a report with the given |id|. If one is not found, null + // Looks for a report with the given `id`. If one is not found, null // will be returned. StatsReport* Find(const StatsReport::Id& id); diff --git a/api/task_queue/queued_task.h b/api/task_queue/queued_task.h index 5748628d7b..27a5eda5a5 100644 --- a/api/task_queue/queued_task.h +++ b/api/task_queue/queued_task.h @@ -20,9 +20,9 @@ class QueuedTask { virtual ~QueuedTask() = default; // Main routine that will run when the task is executed on the desired queue. - // The task should return |true| to indicate that it should be deleted or - // |false| to indicate that the queue should consider ownership of the task - // having been transferred. Returning |false| can be useful if a task has + // The task should return `true` to indicate that it should be deleted or + // `false` to indicate that the queue should consider ownership of the task + // having been transferred. Returning `false` can be useful if a task has // re-posted itself to a different queue or is otherwise being re-used. virtual bool Run() = 0; }; diff --git a/api/task_queue/task_queue_base.h b/api/task_queue/task_queue_base.h index 88419edd8f..b7c92f8647 100644 --- a/api/task_queue/task_queue_base.h +++ b/api/task_queue/task_queue_base.h @@ -11,6 +11,7 @@ #define API_TASK_QUEUE_TASK_QUEUE_BASE_H_ #include +#include #include "api/task_queue/queued_task.h" #include "rtc_base/system/rtc_export.h" @@ -24,6 +25,16 @@ namespace webrtc { // known task queue, use IsCurrent(). class RTC_LOCKABLE RTC_EXPORT TaskQueueBase { public: + enum class DelayPrecision { + // This may include up to a 17 ms leeway in addition to OS timer precision. + // See PostDelayedTask() for more information. + kLow, + // This does not have the additional delay that kLow has, but it is still + // limited by OS timer precision. See PostDelayedHighPrecisionTask() for + // more information. + kHigh, + }; + // Starts destruction of the task queue. // On return ensures no task are running and no new tasks are able to start // on the task queue. @@ -38,7 +49,7 @@ class RTC_LOCKABLE RTC_EXPORT TaskQueueBase { virtual void Delete() = 0; // Schedules a task to execute. Tasks are executed in FIFO order. - // If |task->Run()| returns true, task is deleted on the task queue + // If `task->Run()` returns true, task is deleted on the task queue // before next QueuedTask starts executing. // When a TaskQueue is deleted, pending tasks will not be executed but they // will be deleted. The deletion of tasks may happen synchronously on the @@ -48,14 +59,70 @@ class RTC_LOCKABLE RTC_EXPORT TaskQueueBase { // May be called on any thread or task queue, including this task queue. virtual void PostTask(std::unique_ptr task) = 0; + // Prefer PostDelayedTask() over PostDelayedHighPrecisionTask() whenever + // possible. + // // Schedules a task to execute a specified number of milliseconds from when - // the call is made. The precision should be considered as "best effort" - // and in some cases, such as on Windows when all high precision timers have - // been used up, can be off by as much as 15 millseconds. + // the call is made, using "low" precision. All scheduling is affected by + // OS-specific leeway and current workloads which means that in terms of + // precision there are no hard guarantees, but in addition to the OS induced + // leeway, "low" precision adds up to a 17 ms additional leeway. The purpose + // of this leeway is to achieve more efficient CPU scheduling and reduce Idle + // Wake Up frequency. + // + // The task may execute with [-1, 17 + OS induced leeway) ms additional delay. + // + // Avoid making assumptions about the precision of the OS scheduler. On macOS, + // the OS induced leeway may be 10% of sleep interval. On Windows, 1 ms + // precision timers may be used but there are cases, such as when running on + // battery, when the timer precision can be as poor as 15 ms. + // + // "Low" precision is not implemented everywhere yet. Where not yet + // implemented, PostDelayedTask() has "high" precision. See + // https://crbug.com/webrtc/13583 for more information. + // // May be called on any thread or task queue, including this task queue. virtual void PostDelayedTask(std::unique_ptr task, uint32_t milliseconds) = 0; + // Prefer PostDelayedTask() over PostDelayedHighPrecisionTask() whenever + // possible. + // + // Schedules a task to execute a specified number of milliseconds from when + // the call is made, using "high" precision. All scheduling is affected by + // OS-specific leeway and current workloads which means that in terms of + // precision there are no hard guarantees. + // + // The task may execute with [-1, OS induced leeway] ms additional delay. + // + // Avoid making assumptions about the precision of the OS scheduler. On macOS, + // the OS induced leeway may be 10% of sleep interval. On Windows, 1 ms + // precision timers may be used but there are cases, such as when running on + // battery, when the timer precision can be as poor as 15 ms. + // + // May be called on any thread or task queue, including this task queue. + virtual void PostDelayedHighPrecisionTask(std::unique_ptr task, + uint32_t milliseconds) { + // Remove default implementation when dependencies have implemented this + // method. + PostDelayedTask(std::move(task), milliseconds); + } + + // As specified by |precision|, calls either PostDelayedTask() or + // PostDelayedHighPrecisionTask(). + void PostDelayedTaskWithPrecision(DelayPrecision precision, + std::unique_ptr task, + uint32_t milliseconds) { + switch (precision) { + case DelayPrecision::kLow: + PostDelayedTask(std::move(task), milliseconds); + break; + case DelayPrecision::kHigh: + PostDelayedHighPrecisionTask(std::move(task), milliseconds); + break; + } + } + // Returns the task queue that is running the current thread. // Returns nullptr if this thread is not associated with any task queue. // May be called on any thread or task queue, including this task queue. diff --git a/api/task_queue/task_queue_test.cc b/api/task_queue/task_queue_test.cc index 0d411d2d9c..3458edb159 100644 --- a/api/task_queue/task_queue_test.cc +++ b/api/task_queue/task_queue_test.cc @@ -37,7 +37,7 @@ TEST_P(TaskQueueTest, PostAndCheckCurrent) { rtc::Event event; auto queue = CreateTaskQueue(factory, "PostAndCheckCurrent"); - // We're not running a task, so |queue| shouldn't be current. + // We're not running a task, so `queue` shouldn't be current. // Note that because rtc::Thread also supports the TQ interface and // TestMainImpl::Init wraps the main test thread (bugs.webrtc.org/9714), that // means that TaskQueueBase::Current() will still return a valid value. @@ -190,7 +190,7 @@ TEST_P(TaskQueueTest, PostAndReuse) { } TEST_P(TaskQueueTest, PostALot) { - // Waits until DecrementCount called |count| times. Thread safe. + // Waits until DecrementCount called `count` times. Thread safe. class BlockingCounter { public: explicit BlockingCounter(int initial_count) : count_(initial_count) {} diff --git a/api/task_queue/test/BUILD.gn b/api/task_queue/test/BUILD.gn new file mode 100644 index 0000000000..fa82dd090e --- /dev/null +++ b/api/task_queue/test/BUILD.gn @@ -0,0 +1,18 @@ +# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +rtc_library("mock_task_queue_base") { + testonly = true + sources = [ "mock_task_queue_base.h" ] + deps = [ + "../../../api/task_queue:task_queue", + "../../../test:test_support", + ] +} diff --git a/api/task_queue/test/mock_task_queue_base.h b/api/task_queue/test/mock_task_queue_base.h new file mode 100644 index 0000000000..68c5c052ed --- /dev/null +++ b/api/task_queue/test/mock_task_queue_base.h @@ -0,0 +1,32 @@ +/* + * Copyright 2022 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_TASK_QUEUE_TEST_MOCK_TASK_QUEUE_BASE_H_ +#define API_TASK_QUEUE_TEST_MOCK_TASK_QUEUE_BASE_H_ + +#include + +#include "api/task_queue/task_queue_base.h" +#include "test/gmock.h" + +namespace webrtc { + +class MockTaskQueueBase : public TaskQueueBase { + public: + MOCK_METHOD0(Delete, void()); + MOCK_METHOD1(PostTask, void(std::unique_ptr)); + MOCK_METHOD2(PostDelayedTask, void(std::unique_ptr, uint32_t)); + MOCK_METHOD2(PostDelayedHighPrecisionTask, + void(std::unique_ptr, uint32_t)); +}; + +} // namespace webrtc + +#endif // API_TASK_QUEUE_TEST_MOCK_TASK_QUEUE_BASE_H_ diff --git a/api/test/DEPS b/api/test/DEPS index 329076830c..6c3a9ebed1 100644 --- a/api/test/DEPS +++ b/api/test/DEPS @@ -16,6 +16,10 @@ specific_include_rules = { "+rtc_base/network.h", "+rtc_base/network_constants.h", ], + "peer_network_dependencies\.h": [ + "+rtc_base/network.h", + "+rtc_base/thread.h", + ], "peerconnection_quality_test_fixture\.h": [ "+logging/rtc_event_log/rtc_event_log_factory_interface.h", "+rtc_base/network.h", diff --git a/api/test/audio_quality_analyzer_interface.h b/api/test/audio_quality_analyzer_interface.h index c1044795d1..2eb7817445 100644 --- a/api/test/audio_quality_analyzer_interface.h +++ b/api/test/audio_quality_analyzer_interface.h @@ -25,9 +25,9 @@ class AudioQualityAnalyzerInterface : public StatsObserverInterface { ~AudioQualityAnalyzerInterface() override = default; // Will be called by the framework before the test. - // |test_case_name| is name of test case, that should be used to report all + // `test_case_name` is name of test case, that should be used to report all // audio metrics. - // |analyzer_helper| is a pointer to a class that will allow track_id to + // `analyzer_helper` is a pointer to a class that will allow track_id to // stream_id matching. The caller is responsible for ensuring the // AnalyzerHelper outlives the instance of the AudioQualityAnalyzerInterface. virtual void Start(std::string test_case_name, diff --git a/api/test/audioproc_float.h b/api/test/audioproc_float.h index fec2ad11fa..1ef1c9828f 100644 --- a/api/test/audioproc_float.h +++ b/api/test/audioproc_float.h @@ -23,12 +23,12 @@ namespace test { // utility can be used to simulate the audioprocessing module using a recording // (either an AEC dump or wav files), and generate the output as a wav file. // Any audio_processing object specified in the input is used for the -// simulation. The optional |audio_processing| object provides the +// simulation. The optional `audio_processing` object provides the // AudioProcessing instance that is used during the simulation. Note that when // the audio_processing object is specified all functionality that relies on // using the AudioProcessingBuilder is deactivated, since the AudioProcessing // object is already created and the builder is not used in the simulation. It -// is needed to pass the command line flags as |argc| and |argv|, so these can +// is needed to pass the command line flags as `argc` and `argv`, so these can // be interpreted properly by the utility. To see a list of all supported // command line flags, run the executable with the '--help' flag. int AudioprocFloat(rtc::scoped_refptr audio_processing, @@ -38,10 +38,10 @@ int AudioprocFloat(rtc::scoped_refptr audio_processing, // This is an interface for the audio processing simulation utility. This // utility can be used to simulate the audioprocessing module using a recording // (either an AEC dump or wav files), and generate the output as a wav file. -// The |ap_builder| object will be used to create the AudioProcessing instance -// that is used during the simulation. The |ap_builder| supports setting of +// The `ap_builder` object will be used to create the AudioProcessing instance +// that is used during the simulation. The `ap_builder` supports setting of // injectable components, which will be passed on to the created AudioProcessing -// instance. It is needed to pass the command line flags as |argc| and |argv|, +// instance. It is needed to pass the command line flags as `argc` and `argv`, // so these can be interpreted properly by the utility. // To get a fully-working audioproc_f utility, all that is needed is to write a // main function, create an AudioProcessingBuilder, optionally set custom @@ -56,9 +56,9 @@ int AudioprocFloat(std::unique_ptr ap_builder, // Interface for the audio processing simulation utility, which is similar to // the one above, but which adds the option of receiving the input as a string // and returning the output as an array. The first three arguments fulfill the -// same purpose as above. Pass the |input_aecdump| to provide the content of an +// same purpose as above. Pass the `input_aecdump` to provide the content of an // AEC dump file as a string. After the simulation is completed, -// |processed_capture_samples| will contain the the samples processed on the +// `processed_capture_samples` will contain the the samples processed on the // capture side. int AudioprocFloat(std::unique_ptr ap_builder, int argc, diff --git a/api/test/compile_all_headers.cc b/api/test/compile_all_headers.cc index 5ecdcc1eb8..9275eb0791 100644 --- a/api/test/compile_all_headers.cc +++ b/api/test/compile_all_headers.cc @@ -32,6 +32,7 @@ #include "api/test/fake_frame_encryptor.h" #include "api/test/mock_async_dns_resolver.h" #include "api/test/mock_audio_mixer.h" +#include "api/test/mock_audio_sink.h" #include "api/test/mock_data_channel.h" #include "api/test/mock_frame_decryptor.h" #include "api/test/mock_frame_encryptor.h" @@ -48,3 +49,4 @@ #include "api/test/mock_video_decoder_factory.h" #include "api/test/mock_video_encoder.h" #include "api/test/mock_video_encoder_factory.h" +#include "api/test/mock_video_track.h" diff --git a/api/test/create_frame_generator.h b/api/test/create_frame_generator.h index 151414506c..cd4fcccd69 100644 --- a/api/test/create_frame_generator.h +++ b/api/test/create_frame_generator.h @@ -24,8 +24,8 @@ namespace test { // Creates a frame generator that produces frames with small squares that // move randomly towards the lower right corner. -// |type| has the default value FrameGeneratorInterface::OutputType::I420. -// |num_squares| has the default value 10. +// `type` has the default value FrameGeneratorInterface::OutputType::I420. +// `num_squares` has the default value 10. std::unique_ptr CreateSquareFrameGenerator( int width, int height, @@ -66,7 +66,7 @@ CreateScrollingInputFromYuvFilesFrameGenerator( // Creates a frame generator that produces randomly generated slides. It fills // the frames with randomly sized and colored squares. -// |frame_repeat_count| determines how many times each slide is shown. +// `frame_repeat_count` determines how many times each slide is shown. std::unique_ptr CreateSlideFrameGenerator(int width, int height, int frame_repeat_count); diff --git a/api/test/create_peer_connection_quality_test_frame_generator.cc b/api/test/create_peer_connection_quality_test_frame_generator.cc index 7f0ba20c85..29eb41ca42 100644 --- a/api/test/create_peer_connection_quality_test_frame_generator.cc +++ b/api/test/create_peer_connection_quality_test_frame_generator.cc @@ -30,7 +30,7 @@ void ValidateScreenShareConfig(const VideoConfig& video_config, const ScreenShareConfig& screen_share_config) { if (screen_share_config.slides_yuv_file_names.empty()) { if (screen_share_config.scrolling_params) { - // If we have scrolling params, then its |source_width| and |source_heigh| + // If we have scrolling params, then its `source_width` and `source_heigh` // will be used as width and height of video input, so we have to validate // it against width and height of default input. RTC_CHECK_EQ(screen_share_config.scrolling_params->source_width, diff --git a/api/test/create_peer_connection_quality_test_frame_generator.h b/api/test/create_peer_connection_quality_test_frame_generator.h index ff87331204..ab3f65aa57 100644 --- a/api/test/create_peer_connection_quality_test_frame_generator.h +++ b/api/test/create_peer_connection_quality_test_frame_generator.h @@ -21,7 +21,7 @@ namespace webrtc { namespace webrtc_pc_e2e { // Creates a frame generator that produces frames with small squares that move -// randomly towards the lower right corner. |type| has the default value +// randomly towards the lower right corner. `type` has the default value // FrameGeneratorInterface::OutputType::I420. video_config specifies frame // weight and height. std::unique_ptr CreateSquareFrameGenerator( diff --git a/api/test/create_peerconnection_quality_test_fixture.h b/api/test/create_peerconnection_quality_test_fixture.h index 95b9ced5d2..a0b0d08dd4 100644 --- a/api/test/create_peerconnection_quality_test_fixture.h +++ b/api/test/create_peerconnection_quality_test_fixture.h @@ -25,10 +25,10 @@ namespace webrtc_pc_e2e { // Create test fixture to establish test call between Alice and Bob. // During the test Alice will be caller and Bob will answer the call. -// |test_case_name| is a name of test case, that will be used for all metrics +// `test_case_name` is a name of test case, that will be used for all metrics // reporting. -// |time_controller| is used to manage all rtc::Thread's and TaskQueue -// instances. Instance of |time_controller| have to outlive created fixture. +// `time_controller` is used to manage all rtc::Thread's and TaskQueue +// instances. Instance of `time_controller` have to outlive created fixture. // Returns a non-null PeerConnectionE2EQualityTestFixture instance. std::unique_ptr CreatePeerConnectionE2EQualityTestFixture( diff --git a/api/test/create_time_controller.h b/api/test/create_time_controller.h index 1b6896f2b4..e7bc9cb465 100644 --- a/api/test/create_time_controller.h +++ b/api/test/create_time_controller.h @@ -17,7 +17,7 @@ namespace webrtc { -// Creates a time coltroller that wraps |alarm|. +// Creates a time coltroller that wraps `alarm`. std::unique_ptr CreateTimeController( ControlledAlarmClock* alarm); diff --git a/api/test/dummy_peer_connection.h b/api/test/dummy_peer_connection.h index 80ae20c3c7..4a262564a6 100644 --- a/api/test/dummy_peer_connection.h +++ b/api/test/dummy_peer_connection.h @@ -45,9 +45,7 @@ class DummyPeerConnection : public PeerConnectionInterface { return RTCError(RTCErrorType::UNSUPPORTED_OPERATION, "Not implemented"); } - bool RemoveTrack(RtpSenderInterface* sender) override { return false; } - - RTCError RemoveTrackNew( + RTCError RemoveTrackOrError( rtc::scoped_refptr sender) override { return RTCError(RTCErrorType::UNSUPPORTED_OPERATION, "Not implemented"); } diff --git a/api/test/frame_generator_interface.cc b/api/test/frame_generator_interface.cc index 356fe3af53..fe7b1e883d 100644 --- a/api/test/frame_generator_interface.cc +++ b/api/test/frame_generator_interface.cc @@ -26,7 +26,7 @@ const char* FrameGeneratorInterface::OutputTypeToString( case OutputType::kNV12: return "NV12"; default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } } diff --git a/api/test/mock_async_dns_resolver.h b/api/test/mock_async_dns_resolver.h index e863cac6e6..7cc17a8427 100644 --- a/api/test/mock_async_dns_resolver.h +++ b/api/test/mock_async_dns_resolver.h @@ -24,8 +24,8 @@ class MockAsyncDnsResolverResult : public AsyncDnsResolverResult { MOCK_METHOD(bool, GetResolvedAddress, (int, rtc::SocketAddress*), - (const override)); - MOCK_METHOD(int, GetError, (), (const override)); + (const, override)); + MOCK_METHOD(int, GetError, (), (const, override)); }; class MockAsyncDnsResolver : public AsyncDnsResolverInterface { @@ -34,7 +34,7 @@ class MockAsyncDnsResolver : public AsyncDnsResolverInterface { Start, (const rtc::SocketAddress&, std::function), (override)); - MOCK_METHOD(AsyncDnsResolverResult&, result, (), (const override)); + MOCK_METHOD(AsyncDnsResolverResult&, result, (), (const, override)); }; class MockAsyncDnsResolverFactory : public AsyncDnsResolverFactoryInterface { diff --git a/api/test/mock_audio_sink.h b/api/test/mock_audio_sink.h new file mode 100644 index 0000000000..0c17dc45ca --- /dev/null +++ b/api/test/mock_audio_sink.h @@ -0,0 +1,44 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_TEST_MOCK_AUDIO_SINK_H_ +#define API_TEST_MOCK_AUDIO_SINK_H_ + +#include "absl/types/optional.h" +#include "api/media_stream_interface.h" +#include "test/gmock.h" + +namespace webrtc { + +class MockAudioSink final : public webrtc::AudioTrackSinkInterface { + public: + MOCK_METHOD(void, + OnData, + (const void* audio_data, + int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames), + (override)); + + MOCK_METHOD(void, + OnData, + (const void* audio_data, + int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + absl::optional absolute_capture_timestamp_ms), + (override)); +}; + +} // namespace webrtc + +#endif // API_TEST_MOCK_AUDIO_SINK_H_ diff --git a/api/test/mock_data_channel.h b/api/test/mock_data_channel.h index 9346ffd638..40f7edb08a 100644 --- a/api/test/mock_data_channel.h +++ b/api/test/mock_data_channel.h @@ -22,7 +22,8 @@ class MockDataChannelInterface final : public rtc::RefCountedObject { public: static rtc::scoped_refptr Create() { - return new MockDataChannelInterface(); + return rtc::scoped_refptr( + new MockDataChannelInterface()); } MOCK_METHOD(void, diff --git a/api/test/mock_media_stream_interface.h b/api/test/mock_media_stream_interface.h index 29521e6e23..17a30a877e 100644 --- a/api/test/mock_media_stream_interface.h +++ b/api/test/mock_media_stream_interface.h @@ -22,7 +22,7 @@ class MockAudioSource final : public rtc::RefCountedObject { public: static rtc::scoped_refptr Create() { - return new MockAudioSource(); + return rtc::scoped_refptr(new MockAudioSource()); } MOCK_METHOD(void, @@ -55,7 +55,7 @@ class MockAudioSource final class MockAudioTrack final : public rtc::RefCountedObject { public: static rtc::scoped_refptr Create() { - return new MockAudioTrack(); + return rtc::scoped_refptr(new MockAudioTrack()); } MOCK_METHOD(void, @@ -67,7 +67,7 @@ class MockAudioTrack final : public rtc::RefCountedObject { (ObserverInterface * observer), (override)); MOCK_METHOD(std::string, kind, (), (const, override)); - MOCK_METHOD(std::string, id, (), (const override)); + MOCK_METHOD(std::string, id, (), (const, override)); MOCK_METHOD(bool, enabled, (), (const, override)); MOCK_METHOD(bool, set_enabled, (bool enable), (override)); MOCK_METHOD(TrackState, state, (), (const, override)); diff --git a/api/test/mock_peer_connection_factory_interface.h b/api/test/mock_peer_connection_factory_interface.h index c2f2435fb8..6bab595b5a 100644 --- a/api/test/mock_peer_connection_factory_interface.h +++ b/api/test/mock_peer_connection_factory_interface.h @@ -23,7 +23,8 @@ class MockPeerConnectionFactoryInterface final : public rtc::RefCountedObject { public: static rtc::scoped_refptr Create() { - return new MockPeerConnectionFactoryInterface(); + return rtc::scoped_refptr( + new MockPeerConnectionFactoryInterface()); } MOCK_METHOD(void, SetOptions, (const Options&), (override)); @@ -47,11 +48,11 @@ class MockPeerConnectionFactoryInterface final MOCK_METHOD(RtpCapabilities, GetRtpSenderCapabilities, (cricket::MediaType), - (const override)); + (const, override)); MOCK_METHOD(RtpCapabilities, GetRtpReceiverCapabilities, (cricket::MediaType), - (const override)); + (const, override)); MOCK_METHOD(rtc::scoped_refptr, CreateLocalMediaStream, (const std::string&), diff --git a/api/test/mock_peerconnectioninterface.h b/api/test/mock_peerconnectioninterface.h index b5d94238c8..effd24e294 100644 --- a/api/test/mock_peerconnectioninterface.h +++ b/api/test/mock_peerconnectioninterface.h @@ -18,7 +18,9 @@ #include #include "api/peer_connection_interface.h" +#include "api/scoped_refptr.h" #include "api/sctp_transport_interface.h" +#include "rtc_base/ref_counted_object.h" #include "test/gmock.h" namespace webrtc { @@ -26,6 +28,10 @@ namespace webrtc { class MockPeerConnectionInterface : public rtc::RefCountedObject { public: + static rtc::scoped_refptr Create() { + return rtc::make_ref_counted(); + } + // PeerConnectionInterface MOCK_METHOD(rtc::scoped_refptr, local_streams, @@ -42,9 +48,8 @@ class MockPeerConnectionInterface (rtc::scoped_refptr, const std::vector&), (override)); - MOCK_METHOD(bool, RemoveTrack, (RtpSenderInterface*), (override)); MOCK_METHOD(RTCError, - RemoveTrackNew, + RemoveTrackOrError, (rtc::scoped_refptr), (override)); MOCK_METHOD(RTCErrorOr>, @@ -71,15 +76,15 @@ class MockPeerConnectionInterface MOCK_METHOD(std::vector>, GetSenders, (), - (const override)); + (const, override)); MOCK_METHOD(std::vector>, GetReceivers, (), - (const override)); + (const, override)); MOCK_METHOD(std::vector>, GetTransceivers, (), - (const override)); + (const, override)); MOCK_METHOD(bool, GetStats, (StatsObserver*, MediaStreamTrackInterface*, StatsOutputLevel), @@ -99,7 +104,7 @@ class MockPeerConnectionInterface MOCK_METHOD(rtc::scoped_refptr, GetSctpTransport, (), - (const override)); + (const, override)); MOCK_METHOD(RTCErrorOr>, CreateDataChannelOrError, (const std::string&, const DataChannelInit*), @@ -107,27 +112,27 @@ class MockPeerConnectionInterface MOCK_METHOD(const SessionDescriptionInterface*, local_description, (), - (const override)); + (const, override)); MOCK_METHOD(const SessionDescriptionInterface*, remote_description, (), - (const override)); + (const, override)); MOCK_METHOD(const SessionDescriptionInterface*, current_local_description, (), - (const override)); + (const, override)); MOCK_METHOD(const SessionDescriptionInterface*, current_remote_description, (), - (const override)); + (const, override)); MOCK_METHOD(const SessionDescriptionInterface*, pending_local_description, (), - (const override)); + (const, override)); MOCK_METHOD(const SessionDescriptionInterface*, pending_remote_description, (), - (const override)); + (const, override)); MOCK_METHOD(void, RestartIce, (), (override)); MOCK_METHOD(void, CreateOffer, diff --git a/api/test/mock_rtp_transceiver.h b/api/test/mock_rtp_transceiver.h index a0a08c4772..5ea9028b77 100644 --- a/api/test/mock_rtp_transceiver.h +++ b/api/test/mock_rtp_transceiver.h @@ -23,7 +23,7 @@ class MockRtpTransceiver final : public rtc::RefCountedObject { public: static rtc::scoped_refptr Create() { - return new MockRtpTransceiver(); + return rtc::scoped_refptr(new MockRtpTransceiver()); } MOCK_METHOD(cricket::MediaType, media_type, (), (const, override)); @@ -70,6 +70,10 @@ class MockRtpTransceiver final HeaderExtensionsToOffer, (), (const, override)); + MOCK_METHOD(std::vector, + HeaderExtensionsNegotiated, + (), + (const, override)); MOCK_METHOD(webrtc::RTCError, SetOfferedRtpHeaderExtensions, (rtc::ArrayView diff --git a/api/test/mock_rtpreceiver.h b/api/test/mock_rtpreceiver.h index a0b79e0bed..4bcf064b2a 100644 --- a/api/test/mock_rtpreceiver.h +++ b/api/test/mock_rtpreceiver.h @@ -24,20 +24,20 @@ class MockRtpReceiver : public rtc::RefCountedObject { MOCK_METHOD(rtc::scoped_refptr, track, (), - (const override)); + (const, override)); MOCK_METHOD(std::vector>, streams, (), - (const override)); - MOCK_METHOD(cricket::MediaType, media_type, (), (const override)); - MOCK_METHOD(std::string, id, (), (const override)); - MOCK_METHOD(RtpParameters, GetParameters, (), (const override)); + (const, override)); + MOCK_METHOD(cricket::MediaType, media_type, (), (const, override)); + MOCK_METHOD(std::string, id, (), (const, override)); + MOCK_METHOD(RtpParameters, GetParameters, (), (const, override)); MOCK_METHOD(void, SetObserver, (RtpReceiverObserverInterface*), (override)); MOCK_METHOD(void, SetJitterBufferMinimumDelay, (absl::optional), (override)); - MOCK_METHOD(std::vector, GetSources, (), (const override)); + MOCK_METHOD(std::vector, GetSources, (), (const, override)); }; } // namespace webrtc diff --git a/api/test/mock_rtpsender.h b/api/test/mock_rtpsender.h index f12a6185a6..e4d6399eed 100644 --- a/api/test/mock_rtpsender.h +++ b/api/test/mock_rtpsender.h @@ -25,21 +25,21 @@ class MockRtpSender : public rtc::RefCountedObject { MOCK_METHOD(rtc::scoped_refptr, track, (), - (const override)); - MOCK_METHOD(uint32_t, ssrc, (), (const override)); - MOCK_METHOD(cricket::MediaType, media_type, (), (const override)); - MOCK_METHOD(std::string, id, (), (const override)); - MOCK_METHOD(std::vector, stream_ids, (), (const override)); + (const, override)); + MOCK_METHOD(uint32_t, ssrc, (), (const, override)); + MOCK_METHOD(cricket::MediaType, media_type, (), (const, override)); + MOCK_METHOD(std::string, id, (), (const, override)); + MOCK_METHOD(std::vector, stream_ids, (), (const, override)); MOCK_METHOD(std::vector, init_send_encodings, (), - (const override)); - MOCK_METHOD(RtpParameters, GetParameters, (), (const override)); + (const, override)); + MOCK_METHOD(RtpParameters, GetParameters, (), (const, override)); MOCK_METHOD(RTCError, SetParameters, (const RtpParameters&), (override)); MOCK_METHOD(rtc::scoped_refptr, GetDtmfSender, (), - (const override)); + (const, override)); }; } // namespace webrtc diff --git a/api/test/mock_transformable_video_frame.h b/api/test/mock_transformable_video_frame.h index 36798b5d73..5cebcaba80 100644 --- a/api/test/mock_transformable_video_frame.h +++ b/api/test/mock_transformable_video_frame.h @@ -21,9 +21,9 @@ namespace webrtc { class MockTransformableVideoFrame : public webrtc::TransformableVideoFrameInterface { public: - MOCK_METHOD(rtc::ArrayView, GetData, (), (const override)); + MOCK_METHOD(rtc::ArrayView, GetData, (), (const, override)); MOCK_METHOD(void, SetData, (rtc::ArrayView data), (override)); - MOCK_METHOD(uint32_t, GetTimestamp, (), (const override)); + MOCK_METHOD(uint32_t, GetTimestamp, (), (const, override)); MOCK_METHOD(uint32_t, GetSsrc, (), (const, override)); MOCK_METHOD(bool, IsKeyFrame, (), (const, override)); MOCK_METHOD(std::vector, GetAdditionalData, (), (const, override)); diff --git a/api/test/mock_video_decoder.h b/api/test/mock_video_decoder.h index faadabc4d7..b6d53f8d8d 100644 --- a/api/test/mock_video_decoder.h +++ b/api/test/mock_video_decoder.h @@ -37,10 +37,13 @@ class MockDecodedImageCallback : public DecodedImageCallback { class MockVideoDecoder : public VideoDecoder { public: - MOCK_METHOD(int32_t, - InitDecode, - (const VideoCodec* codec_settings, int32_t number_of_cores), - (override)); + MockVideoDecoder() { + // Make `Configure` succeed by default, so that individual tests that + // verify other methods wouldn't need to stub `Configure`. + ON_CALL(*this, Configure).WillByDefault(testing::Return(true)); + } + + MOCK_METHOD(bool, Configure, (const Settings& settings), (override)); MOCK_METHOD(int32_t, Decode, (const EncodedImage& input_image, diff --git a/api/test/mock_video_encoder_factory.h b/api/test/mock_video_encoder_factory.h index 1aa14631be..79851096b7 100644 --- a/api/test/mock_video_encoder_factory.h +++ b/api/test/mock_video_encoder_factory.h @@ -28,10 +28,6 @@ class MockVideoEncoderFactory : public webrtc::VideoEncoderFactory { GetSupportedFormats, (), (const, override)); - MOCK_METHOD(CodecInfo, - QueryVideoEncoder, - (const SdpVideoFormat&), - (const, override)); MOCK_METHOD(std::unique_ptr, CreateVideoEncoder, (const SdpVideoFormat&), diff --git a/api/test/mock_video_track.h b/api/test/mock_video_track.h new file mode 100644 index 0000000000..705d13509b --- /dev/null +++ b/api/test/mock_video_track.h @@ -0,0 +1,69 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_TEST_MOCK_VIDEO_TRACK_H_ +#define API_TEST_MOCK_VIDEO_TRACK_H_ + +#include + +#include "api/media_stream_interface.h" +#include "api/scoped_refptr.h" +#include "rtc_base/ref_counted_object.h" +#include "test/gmock.h" + +namespace webrtc { + +class MockVideoTrack final + : public rtc::RefCountedObject { + public: + static rtc::scoped_refptr Create() { + return rtc::scoped_refptr(new MockVideoTrack()); + } + + // NotifierInterface + MOCK_METHOD(void, + RegisterObserver, + (ObserverInterface * observer), + (override)); + MOCK_METHOD(void, + UnregisterObserver, + (ObserverInterface * observer), + (override)); + + // MediaStreamTrackInterface + MOCK_METHOD(std::string, kind, (), (const, override)); + MOCK_METHOD(std::string, id, (), (const, override)); + MOCK_METHOD(bool, enabled, (), (const, override)); + MOCK_METHOD(bool, set_enabled, (bool enable), (override)); + MOCK_METHOD(TrackState, state, (), (const, override)); + + // VideoSourceInterface + MOCK_METHOD(void, + AddOrUpdateSink, + (rtc::VideoSinkInterface * sink, + const rtc::VideoSinkWants& wants), + (override)); + // RemoveSink must guarantee that at the time the method returns, + // there is no current and no future calls to VideoSinkInterface::OnFrame. + MOCK_METHOD(void, + RemoveSink, + (rtc::VideoSinkInterface * sink), + (override)); + + // VideoTrackInterface + MOCK_METHOD(VideoTrackSourceInterface*, GetSource, (), (const, override)); + + MOCK_METHOD(ContentHint, content_hint, (), (const, override)); + MOCK_METHOD(void, set_content_hint, (ContentHint hint), (override)); +}; + +} // namespace webrtc + +#endif // API_TEST_MOCK_VIDEO_TRACK_H_ diff --git a/api/test/network_emulation/cross_traffic.h b/api/test/network_emulation/cross_traffic.h index 85343e44d2..737a93c2fd 100644 --- a/api/test/network_emulation/cross_traffic.h +++ b/api/test/network_emulation/cross_traffic.h @@ -27,12 +27,12 @@ class CrossTrafficRoute { public: virtual ~CrossTrafficRoute() = default; - // Triggers sending of dummy packets with size |packet_size| bytes. + // Triggers sending of dummy packets with size `packet_size` bytes. virtual void TriggerPacketBurst(size_t num_packets, size_t packet_size) = 0; // Sends a packet over the nodes. The content of the packet is unspecified; // only the size metter for the emulation purposes. virtual void SendPacket(size_t packet_size) = 0; - // Sends a packet over the nodes and runs |action| when it has been delivered. + // Sends a packet over the nodes and runs `action` when it has been delivered. virtual void NetworkDelayedAction(size_t packet_size, std::function action) = 0; }; diff --git a/api/test/network_emulation/network_emulation_interfaces.h b/api/test/network_emulation/network_emulation_interfaces.h index c8e6ed053e..735689c734 100644 --- a/api/test/network_emulation/network_emulation_interfaces.h +++ b/api/test/network_emulation/network_emulation_interfaces.h @@ -204,9 +204,9 @@ class EmulatedNetworkStats { class EmulatedEndpoint : public EmulatedNetworkReceiverInterface { public: // Send packet into network. - // |from| will be used to set source address for the packet in destination + // `from` will be used to set source address for the packet in destination // socket. - // |to| will be used for routing verification and picking right socket by port + // `to` will be used for routing verification and picking right socket by port // on destination endpoint. virtual void SendPacket(const rtc::SocketAddress& from, const rtc::SocketAddress& to, @@ -214,12 +214,12 @@ class EmulatedEndpoint : public EmulatedNetworkReceiverInterface { uint16_t application_overhead = 0) = 0; // Binds receiver to this endpoint to send and receive data. - // |desired_port| is a port that should be used. If it is equal to 0, + // `desired_port` is a port that should be used. If it is equal to 0, // endpoint will pick the first available port starting from - // |kFirstEphemeralPort|. + // `kFirstEphemeralPort`. // // Returns the port, that should be used (it will be equals to desired, if - // |desired_port| != 0 and is free or will be the one, selected by endpoint) + // `desired_port` != 0 and is free or will be the one, selected by endpoint) // or absl::nullopt if desired_port in used. Also fails if there are no more // free ports to bind to. // @@ -256,7 +256,7 @@ class EmulatedEndpoint : public EmulatedNetworkReceiverInterface { // they are guranteed to be delivered eventually, even on lossy networks. class TcpMessageRoute { public: - // Sends a TCP message of the given |size| over the route, |on_received| is + // Sends a TCP message of the given `size` over the route, `on_received` is // called when the message has been delivered. Note that the connection // parameters are reset iff there's no currently pending message on the route. virtual void SendMessage(size_t size, std::function on_received) = 0; diff --git a/api/test/network_emulation_manager.h b/api/test/network_emulation_manager.h index ec51b290e0..b5c68af5f3 100644 --- a/api/test/network_emulation_manager.h +++ b/api/test/network_emulation_manager.h @@ -17,8 +17,10 @@ #include #include "api/array_view.h" +#include "api/packet_socket_factory.h" #include "api/test/network_emulation/cross_traffic.h" #include "api/test/network_emulation/network_emulation_interfaces.h" +#include "api/test/peer_network_dependencies.h" #include "api/test/simulated_network.h" #include "api/test/time_controller.h" #include "api/units/timestamp.h" @@ -125,12 +127,19 @@ class EmulatedNetworkManagerInterface { // WebRTC to properly setup network emulation. Returned manager is owned by // EmulatedNetworkManagerInterface implementation. virtual rtc::NetworkManager* network_manager() = 0; + // Returns non-null pointer to packet socket factory that have to be injected + // into WebRTC to properly setup network emulation. Returned factory is owned + // by EmulatedNetworkManagerInterface implementation. + virtual rtc::PacketSocketFactory* packet_socket_factory() = 0; + webrtc::webrtc_pc_e2e::PeerNetworkDependencies network_dependencies() { + return {network_thread(), network_manager(), packet_socket_factory()}; + } // Returns list of endpoints that are associated with this instance. Pointers // are guaranteed to be non-null and are owned by NetworkEmulationManager. virtual std::vector endpoints() const = 0; // Passes summarized network stats for endpoints for this manager into - // specified |stats_callback|. Callback will be executed on network emulation + // specified `stats_callback`. Callback will be executed on network emulation // internal task queue. virtual void GetStats( std::function)> stats_callback) @@ -180,13 +189,13 @@ class NetworkEmulationManager { // Creates an emulated network node, which represents single network in // the emulated network layer. Uses default implementation on network behavior - // which can be configured with |config|. |random_seed| can be provided to + // which can be configured with `config`. `random_seed` can be provided to // alter randomization behavior. virtual EmulatedNetworkNode* CreateEmulatedNode( BuiltInNetworkBehaviorConfig config, uint64_t random_seed = 1) = 0; // Creates an emulated network node, which represents single network in - // the emulated network layer. |network_behavior| determines how created node + // the emulated network layer. `network_behavior` determines how created node // will forward incoming packets to the next receiver. virtual EmulatedNetworkNode* CreateEmulatedNode( std::unique_ptr network_behavior) = 0; @@ -205,8 +214,8 @@ class NetworkEmulationManager { // Creates a route between endpoints going through specified network nodes. // This route is single direction only and describe how traffic that was - // sent by network interface |from| have to be delivered to the network - // interface |to|. Return object can be used to remove created route. The + // sent by network interface `from` have to be delivered to the network + // interface `to`. Return object can be used to remove created route. The // route must contains at least one network node inside it. // // Assume that E{0-9} are endpoints and N{0-9} are network nodes, then @@ -228,7 +237,7 @@ class NetworkEmulationManager { const std::vector& via_nodes, EmulatedEndpoint* to) = 0; - // Creates a route over the given |via_nodes| creating the required endpoints + // Creates a route over the given `via_nodes` creating the required endpoints // in the process. The returned EmulatedRoute pointer can be used in other // calls as a transport route for message or cross traffic. virtual EmulatedRoute* CreateRoute( @@ -239,7 +248,7 @@ class NetworkEmulationManager { // packet's destination IP. // // This route is single direction only and describe how traffic that was - // sent by network interface |from| have to be delivered in case if routing + // sent by network interface `from` have to be delivered in case if routing // was unspecified. Return object can be used to remove created route. The // route must contains at least one network node inside it. // @@ -269,29 +278,29 @@ class NetworkEmulationManager { // packets being dropped. virtual void ClearRoute(EmulatedRoute* route) = 0; - // Creates a simulated TCP connection using |send_route| for traffic and - // |ret_route| for feedback. This can be used to emulate HTTP cross traffic + // Creates a simulated TCP connection using `send_route` for traffic and + // `ret_route` for feedback. This can be used to emulate HTTP cross traffic // and to implement realistic reliable signaling over lossy networks. // TODO(srte): Handle clearing of the routes involved. virtual TcpMessageRoute* CreateTcpRoute(EmulatedRoute* send_route, EmulatedRoute* ret_route) = 0; - // Creates a route over the given |via_nodes|. Returns an object that can be + // Creates a route over the given `via_nodes`. Returns an object that can be // used to emulate network load with cross traffic over the created route. virtual CrossTrafficRoute* CreateCrossTrafficRoute( const std::vector& via_nodes) = 0; - // Starts generating cross traffic using given |generator|. Takes ownership + // Starts generating cross traffic using given `generator`. Takes ownership // over the generator. virtual CrossTrafficGenerator* StartCrossTraffic( std::unique_ptr generator) = 0; - // Stops generating cross traffic that was started using given |generator|. - // The |generator| shouldn't be used after and the reference may be invalid. + // Stops generating cross traffic that was started using given `generator`. + // The `generator` shouldn't be used after and the reference may be invalid. virtual void StopCrossTraffic(CrossTrafficGenerator* generator) = 0; // Creates EmulatedNetworkManagerInterface which can be used then to inject - // network emulation layer into PeerConnection. |endpoints| - are available + // network emulation layer into PeerConnection. `endpoints` - are available // network interfaces for PeerConnection. If endpoint is enabled, it will be // immediately available for PeerConnection, otherwise user will be able to // enable endpoint later to make it available for PeerConnection. @@ -299,8 +308,8 @@ class NetworkEmulationManager { CreateEmulatedNetworkManagerInterface( const std::vector& endpoints) = 0; - // Passes summarized network stats for specified |endpoints| into specified - // |stats_callback|. Callback will be executed on network emulation + // Passes summarized network stats for specified `endpoints` into specified + // `stats_callback`. Callback will be executed on network emulation // internal task queue. virtual void GetStats( rtc::ArrayView endpoints, diff --git a/api/test/peer_network_dependencies.h b/api/test/peer_network_dependencies.h new file mode 100644 index 0000000000..6f85ad0a4d --- /dev/null +++ b/api/test/peer_network_dependencies.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_TEST_PEER_NETWORK_DEPENDENCIES_H_ +#define API_TEST_PEER_NETWORK_DEPENDENCIES_H_ + +#include "api/packet_socket_factory.h" +#include "rtc_base/network.h" +#include "rtc_base/thread.h" + +namespace webrtc { +namespace webrtc_pc_e2e { + +// The network dependencies needed when adding a peer to tests using +// PeerConnectionE2EQualityTestFixture. +struct PeerNetworkDependencies { + rtc::Thread* network_thread; + rtc::NetworkManager* network_manager; + rtc::PacketSocketFactory* packet_socket_factory; +}; + +} // namespace webrtc_pc_e2e +} // namespace webrtc + +#endif // API_TEST_PEER_NETWORK_DEPENDENCIES_H_ diff --git a/api/test/peerconnection_quality_test_fixture.h b/api/test/peerconnection_quality_test_fixture.h index 8717e8f73d..198868d7a7 100644 --- a/api/test/peerconnection_quality_test_fixture.h +++ b/api/test/peerconnection_quality_test_fixture.h @@ -30,6 +30,7 @@ #include "api/task_queue/task_queue_factory.h" #include "api/test/audio_quality_analyzer_interface.h" #include "api/test/frame_generator_interface.h" +#include "api/test/peer_network_dependencies.h" #include "api/test/simulated_network.h" #include "api/test/stats_observer_interface.h" #include "api/test/track_id_stream_info_map.h" @@ -67,17 +68,17 @@ class PeerConnectionE2EQualityTestFixture { // bottom right corner of the picture. // // In such case source dimensions must be greater or equal to the sliding - // window dimensions. So |source_width| and |source_height| are the dimensions - // of the source frame, while |VideoConfig::width| and |VideoConfig::height| + // window dimensions. So `source_width` and `source_height` are the dimensions + // of the source frame, while `VideoConfig::width` and `VideoConfig::height` // are the dimensions of the sliding window. // - // Because |source_width| and |source_height| are dimensions of the source + // Because `source_width` and `source_height` are dimensions of the source // frame, they have to be width and height of videos from - // |ScreenShareConfig::slides_yuv_file_names|. + // `ScreenShareConfig::slides_yuv_file_names`. // // Because scrolling have to be done on single slide it also requires, that - // |duration| must be less or equal to - // |ScreenShareConfig::slide_change_interval|. + // `duration` must be less or equal to + // `ScreenShareConfig::slide_change_interval`. struct ScrollingParams { ScrollingParams(TimeDelta duration, size_t source_width, @@ -110,16 +111,16 @@ class PeerConnectionE2EQualityTestFixture { // will be applied in such case. bool generate_slides = false; // If present scrolling will be applied. Please read extra requirement on - // |slides_yuv_file_names| for scrolling. + // `slides_yuv_file_names` for scrolling. absl::optional scrolling_params; // Contains list of yuv files with slides. // // If empty, default set of slides will be used. In such case - // |VideoConfig::width| must be equal to |kDefaultSlidesWidth| and - // |VideoConfig::height| must be equal to |kDefaultSlidesHeight| or if - // |scrolling_params| are specified, then |ScrollingParams::source_width| - // must be equal to |kDefaultSlidesWidth| and - // |ScrollingParams::source_height| must be equal to |kDefaultSlidesHeight|. + // `VideoConfig::width` must be equal to `kDefaultSlidesWidth` and + // `VideoConfig::height` must be equal to `kDefaultSlidesHeight` or if + // `scrolling_params` are specified, then `ScrollingParams::source_width` + // must be equal to `kDefaultSlidesWidth` and + // `ScrollingParams::source_height` must be equal to `kDefaultSlidesHeight`. std::vector slides_yuv_file_names; }; @@ -128,7 +129,7 @@ class PeerConnectionE2EQualityTestFixture { // SVC support is limited: // During SVC testing there is no SFU, so framework will try to emulate SFU // behavior in regular p2p call. Because of it there are such limitations: - // * if |target_spatial_index| is not equal to the highest spatial layer + // * if `target_spatial_index` is not equal to the highest spatial layer // then no packet/frame drops are allowed. // // If there will be any drops, that will affect requested layer, then @@ -154,11 +155,11 @@ class PeerConnectionE2EQualityTestFixture { // Specifies spatial index of the video stream to analyze. // There are 2 cases: // 1. simulcast encoder is used: - // in such case |target_spatial_index| will specify the index of + // in such case `target_spatial_index` will specify the index of // simulcast stream, that should be analyzed. Other streams will be // dropped. // 2. SVC encoder is used: - // in such case |target_spatial_index| will specify the top interesting + // in such case `target_spatial_index` will specify the top interesting // spatial layer and all layers below, including target one will be // processed. All layers above target one will be dropped. // If not specified than whatever stream will be received will be analyzed. @@ -166,8 +167,8 @@ class PeerConnectionE2EQualityTestFixture { // network. absl::optional target_spatial_index; - // Encoding parameters per simulcast layer. If not empty, |encoding_params| - // size have to be equal to |simulcast_streams_count|. Will be used to set + // Encoding parameters per simulcast layer. If not empty, `encoding_params` + // size have to be equal to `simulcast_streams_count`. Will be used to set // transceiver send encoding params for simulcast layers. Applicable only // for codecs that support simulcast (ex. Vp8) and will be ignored // otherwise. RtpEncodingParameters::rid may be changed by fixture @@ -179,6 +180,14 @@ class PeerConnectionE2EQualityTestFixture { struct VideoConfig { VideoConfig(size_t width, size_t height, int32_t fps) : width(width), height(height), fps(fps) {} + VideoConfig(std::string stream_label, + size_t width, + size_t height, + int32_t fps) + : width(width), + height(height), + fps(fps), + stream_label(std::move(stream_label)) {} // Video stream width. const size_t width; @@ -220,16 +229,38 @@ class PeerConnectionE2EQualityTestFixture { // was captured during the test for this video stream on sender side. // It is useful when generator is used as input. absl::optional input_dump_file_name; - // Used only if |input_dump_file_name| is set. Specifies the module for the + // Used only if `input_dump_file_name` is set. Specifies the module for the // video frames to be dumped. Modulo equals X means every Xth frame will be // written to the dump file. The value must be greater than 0. int input_dump_sampling_modulo = 1; // If specified this file will be used as output on the receiver side for - // this stream. If multiple streams will be produced by input stream, - // output files will be appended with indexes. The produced files contains - // what was rendered for this video stream on receiver side. + // this stream. + // + // If multiple output streams will be produced by this stream (e.g. when the + // stream represented by this `VideoConfig` is received by more than one + // peer), output files will be appended with receiver names. If the second + // and other receivers will be added in the middle of the call after the + // first frame for this stream has been already written to the output file, + // then only dumps for newly added peers will be appended with receiver + // name, the dump for the first receiver will have name equal to the + // specified one. For example: + // * If we have peers A and B and A has `VideoConfig` V_a with + // V_a.output_dump_file_name = "/foo/a_output.yuv", then the stream + // related to V_a will be written into "/foo/a_output.yuv". + // * If we have peers A, B and C and A has `VideoConfig` V_a with + // V_a.output_dump_file_name = "/foo/a_output.yuv", then the stream + // related to V_a will be written for peer B into "/foo/a_output.yuv.B" + // and for peer C into "/foo/a_output.yuv.C" + // * If we have peers A and B and A has `VideoConfig` V_a with + // V_a.output_dump_file_name = "/foo/a_output.yuv", then if after B + // received the first frame related to V_a peer C joined the call, then + // the stream related to V_a will be written for peer B into + // "/foo/a_output.yuv" and for peer C into "/foo/a_output.yuv.C" + // + // The produced files contains what was rendered for this video stream on + // receiver side. absl::optional output_dump_file_name; - // Used only if |output_dump_file_name| is set. Specifies the module for the + // Used only if `output_dump_file_name` is set. Specifies the module for the // video frames to be dumped. Modulo equals X means every Xth frame will be // written to the dump file. The value must be greater than 0. int output_dump_sampling_modulo = 1; @@ -247,6 +278,11 @@ class PeerConnectionE2EQualityTestFixture { kGenerated, kFile, }; + + AudioConfig() = default; + explicit AudioConfig(std::string stream_label) + : stream_label(std::move(stream_label)) {} + // Have to be unique among all specified configs for all peers in the call. // Will be auto generated if omitted. absl::optional stream_label; @@ -268,6 +304,27 @@ class PeerConnectionE2EQualityTestFixture { absl::optional sync_group; }; + struct VideoCodecConfig { + explicit VideoCodecConfig(std::string name) + : name(std::move(name)), required_params() {} + VideoCodecConfig(std::string name, + std::map required_params) + : name(std::move(name)), required_params(std::move(required_params)) {} + // Next two fields are used to specify concrete video codec, that should be + // used in the test. Video code will be negotiated in SDP during offer/ + // answer exchange. + // Video codec name. You can find valid names in + // media/base/media_constants.h + std::string name = cricket::kVp8CodecName; + // Map of parameters, that have to be specified on SDP codec. Each parameter + // is described by key and value. Codec parameters will match the specified + // map if and only if for each key from `required_params` there will be + // a parameter with name equal to this key and parameter value will be equal + // to the value from `required_params` for this key. + // If empty then only name will be used to match the codec. + std::map required_params; + }; + // This class is used to fully configure one peer inside the call. class PeerConfigurer { public: @@ -328,9 +385,33 @@ class PeerConnectionE2EQualityTestFixture { virtual PeerConfigurer* AddVideoConfig( VideoConfig config, CapturingDeviceIndex capturing_device_index) = 0; + // Set the list of video codecs used by the peer during the test. These + // codecs will be negotiated in SDP during offer/answer exchange. The order + // of these codecs during negotiation will be the same as in `video_codecs`. + // Codecs have to be available in codecs list provided by peer connection to + // be negotiated. If some of specified codecs won't be found, the test will + // crash. + virtual PeerConfigurer* SetVideoCodecs( + std::vector video_codecs) = 0; // Set the audio stream for the call from this peer. If this method won't // be invoked, this peer will send no audio. virtual PeerConfigurer* SetAudioConfig(AudioConfig config) = 0; + + // Set if ULP FEC should be used or not. False by default. + virtual PeerConfigurer* SetUseUlpFEC(bool value) = 0; + // Set if Flex FEC should be used or not. False by default. + // Client also must enable `enable_flex_fec_support` in the `RunParams` to + // be able to use this feature. + virtual PeerConfigurer* SetUseFlexFEC(bool value) = 0; + // Specifies how much video encoder target bitrate should be different than + // target bitrate, provided by WebRTC stack. Must be greater than 0. Can be + // used to emulate overshooting of video encoders. This multiplier will + // be applied for all video encoder on both sides for all layers. Bitrate + // estimated by WebRTC stack will be multiplied by this multiplier and then + // provided into VideoEncoder::SetRates(...). 1.0 by default. + virtual PeerConfigurer* SetVideoEncoderBitrateMultiplier( + double multiplier) = 0; + // If is set, an RTCEventLog will be saved in that location and it will be // available for further analysis. virtual PeerConfigurer* SetRtcEventLogPath(std::string path) = 0; @@ -352,27 +433,6 @@ class PeerConnectionE2EQualityTestFixture { TimeDelta echo_delay = TimeDelta::Millis(50); }; - struct VideoCodecConfig { - explicit VideoCodecConfig(std::string name) - : name(std::move(name)), required_params() {} - VideoCodecConfig(std::string name, - std::map required_params) - : name(std::move(name)), required_params(std::move(required_params)) {} - // Next two fields are used to specify concrete video codec, that should be - // used in the test. Video code will be negotiated in SDP during offer/ - // answer exchange. - // Video codec name. You can find valid names in - // media/base/media_constants.h - std::string name = cricket::kVp8CodecName; - // Map of parameters, that have to be specified on SDP codec. Each parameter - // is described by key and value. Codec parameters will match the specified - // map if and only if for each key from |required_params| there will be - // a parameter with name equal to this key and parameter value will be equal - // to the value from |required_params| for this key. - // If empty then only name will be used to match the codec. - std::map required_params; - }; - // Contains parameters, that describe how long framework should run quality // test. struct RunParams { @@ -383,23 +443,9 @@ class PeerConnectionE2EQualityTestFixture { // it will be shut downed. TimeDelta run_duration; - // List of video codecs to use during the test. These codecs will be - // negotiated in SDP during offer/answer exchange. The order of these codecs - // during negotiation will be the same as in |video_codecs|. Codecs have - // to be available in codecs list provided by peer connection to be - // negotiated. If some of specified codecs won't be found, the test will - // crash. - // If list is empty Vp8 with no required_params will be used. - std::vector video_codecs; - bool use_ulp_fec = false; - bool use_flex_fec = false; - // Specifies how much video encoder target bitrate should be different than - // target bitrate, provided by WebRTC stack. Must be greater then 0. Can be - // used to emulate overshooting of video encoders. This multiplier will - // be applied for all video encoder on both sides for all layers. Bitrate - // estimated by WebRTC stack will be multiplied on this multiplier and then - // provided into VideoEncoder::SetRates(...). - double video_encoder_bitrate_multiplier = 1.0; + // If set to true peers will be able to use Flex FEC, otherwise they won't + // be able to negotiate it even if it's enabled on per peer level. + bool enable_flex_fec_support = false; // If true will set conference mode in SDP media section for all video // tracks for all peers. bool use_conference_mode = false; @@ -416,9 +462,9 @@ class PeerConnectionE2EQualityTestFixture { // Invoked by framework after peer connection factory and peer connection // itself will be created but before offer/answer exchange will be started. - // |test_case_name| is name of test case, that should be used to report all + // `test_case_name` is name of test case, that should be used to report all // metrics. - // |reporter_helper| is a pointer to a class that will allow track_id to + // `reporter_helper` is a pointer to a class that will allow track_id to // stream_id matching. The caller is responsible for ensuring the // TrackIdStreamInfoMap will be valid from Start() to // StopAndReportResults(). @@ -430,17 +476,24 @@ class PeerConnectionE2EQualityTestFixture { virtual void StopAndReportResults() = 0; }; + // Represents single participant in call and can be used to perform different + // in-call actions. Might be extended in future. + class PeerHandle { + public: + virtual ~PeerHandle() = default; + }; + virtual ~PeerConnectionE2EQualityTestFixture() = default; // Add activity that will be executed on the best effort at least after - // |target_time_since_start| after call will be set up (after offer/answer + // `target_time_since_start` after call will be set up (after offer/answer // exchange, ICE gathering will be done and ICE candidates will passed to - // remote side). |func| param is amount of time spent from the call set up. + // remote side). `func` param is amount of time spent from the call set up. virtual void ExecuteAt(TimeDelta target_time_since_start, std::function func) = 0; - // Add activity that will be executed every |interval| with first execution - // on the best effort at least after |initial_delay_since_start| after call - // will be set up (after all participants will be connected). |func| param is + // Add activity that will be executed every `interval` with first execution + // on the best effort at least after `initial_delay_since_start` after call + // will be set up (after all participants will be connected). `func` param is // amount of time spent from the call set up. virtual void ExecuteEvery(TimeDelta initial_delay_since_start, TimeDelta interval, @@ -452,15 +505,15 @@ class PeerConnectionE2EQualityTestFixture { // Add a new peer to the call and return an object through which caller // can configure peer's behavior. - // |network_thread| will be used as network thread for peer's peer connection - // |network_manager| will be used to provide network interfaces for peer's - // peer connection. - // |configurer| function will be used to configure peer in the call. - virtual void AddPeer(rtc::Thread* network_thread, - rtc::NetworkManager* network_manager, - rtc::FunctionView configurer) = 0; + // `network_dependencies` are used to provide networking for peer's peer + // connection. Members must be non-null. + // `configurer` function will be used to configure peer in the call. + virtual PeerHandle* AddPeer( + const PeerNetworkDependencies& network_dependencies, + rtc::FunctionView configurer) = 0; + // Runs the media quality test, which includes setting up the call with - // configured participants, running it according to provided |run_params| and + // configured participants, running it according to provided `run_params` and // terminating it properly at the end. During call duration media quality // metrics are gathered, which are then reported to stdout and (if configured) // to the json/protobuf output file through the WebRTC perf test results diff --git a/api/test/stats_observer_interface.h b/api/test/stats_observer_interface.h index ea4d6c23db..58d8f52d77 100644 --- a/api/test/stats_observer_interface.h +++ b/api/test/stats_observer_interface.h @@ -23,7 +23,7 @@ class StatsObserverInterface { virtual ~StatsObserverInterface() = default; // Method called when stats reports are available for the PeerConnection - // identified by |pc_label|. + // identified by `pc_label`. virtual void OnStatsReports( absl::string_view pc_label, const rtc::scoped_refptr& report) = 0; diff --git a/api/test/time_controller.h b/api/test/time_controller.h index bd3192ddf2..17aa0db80f 100644 --- a/api/test/time_controller.h +++ b/api/test/time_controller.h @@ -44,7 +44,7 @@ class TimeController { // Creates a process thread. virtual std::unique_ptr CreateProcessThread( const char* thread_name) = 0; - // Creates an rtc::Thread instance. If |socket_server| is nullptr, a default + // Creates an rtc::Thread instance. If `socket_server` is nullptr, a default // noop socket server is created. // Returned thread is not null and started. virtual std::unique_ptr CreateThread( @@ -55,12 +55,12 @@ class TimeController { // thread. virtual rtc::Thread* GetMainThread() = 0; // Allow task queues and process threads created by this instance to execute - // for the given |duration|. + // for the given `duration`. virtual void AdvanceTime(TimeDelta duration) = 0; // Waits until condition() == true, polling condition() in small time // intervals. - // Returns true if condition() was evaluated to true before |max_duration| + // Returns true if condition() was evaluated to true before `max_duration` // elapsed and false otherwise. bool Wait(const std::function& condition, TimeDelta max_duration = TimeDelta::Seconds(5)); @@ -75,17 +75,17 @@ class ControlledAlarmClock { // Gets a clock that tells the alarm clock's notion of time. virtual Clock* GetClock() = 0; - // Schedules the alarm to fire at |deadline|. - // An alarm clock only supports one deadline. Calls to |ScheduleAlarmAt| with + // Schedules the alarm to fire at `deadline`. + // An alarm clock only supports one deadline. Calls to `ScheduleAlarmAt` with // an earlier deadline will reset the alarm to fire earlier.Calls to - // |ScheduleAlarmAt| with a later deadline are ignored. Returns true if the + // `ScheduleAlarmAt` with a later deadline are ignored. Returns true if the // deadline changed, false otherwise. virtual bool ScheduleAlarmAt(Timestamp deadline) = 0; // Sets the callback that should be run when the alarm fires. virtual void SetCallback(std::function callback) = 0; - // Waits for |duration| to pass, according to the alarm clock. + // Waits for `duration` to pass, according to the alarm clock. virtual void Sleep(TimeDelta duration) = 0; }; diff --git a/api/test/track_id_stream_info_map.h b/api/test/track_id_stream_info_map.h index bb73cfd997..0f8e43e20e 100644 --- a/api/test/track_id_stream_info_map.h +++ b/api/test/track_id_stream_info_map.h @@ -16,7 +16,7 @@ namespace webrtc { namespace webrtc_pc_e2e { -// Instances of |TrackIdStreamInfoMap| provide bookkeeping capabilities that +// Instances of `TrackIdStreamInfoMap` provide bookkeeping capabilities that // are useful to associate stats reports track_ids to the remote stream info. class TrackIdStreamInfoMap { public: @@ -26,12 +26,12 @@ class TrackIdStreamInfoMap { // StatsObserverInterface::OnStatsReports is invoked. // Returns a reference to a stream label owned by the TrackIdStreamInfoMap. - // Precondition: |track_id| must be already mapped to stream label. + // Precondition: `track_id` must be already mapped to stream label. virtual absl::string_view GetStreamLabelFromTrackId( absl::string_view track_id) const = 0; // Returns a reference to a sync group name owned by the TrackIdStreamInfoMap. - // Precondition: |track_id| must be already mapped to sync group. + // Precondition: `track_id` must be already mapped to sync group. virtual absl::string_view GetSyncGroupLabelFromTrackId( absl::string_view track_id) const = 0; }; diff --git a/api/test/video/function_video_encoder_factory.h b/api/test/video/function_video_encoder_factory.h index a452eee7c4..9ae9719916 100644 --- a/api/test/video/function_video_encoder_factory.h +++ b/api/test/video/function_video_encoder_factory.h @@ -39,7 +39,7 @@ class FunctionVideoEncoderFactory final : public VideoEncoderFactory { // Unused by tests. std::vector GetSupportedFormats() const override { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return {}; } diff --git a/api/test/video_quality_analyzer_interface.h b/api/test/video_quality_analyzer_interface.h index 4488e5abf4..d27c9ea015 100644 --- a/api/test/video_quality_analyzer_interface.h +++ b/api/test/video_quality_analyzer_interface.h @@ -23,7 +23,6 @@ #include "api/video_codecs/video_encoder.h" namespace webrtc { -namespace webrtc_pc_e2e { // API is in development and can be changed without notice. @@ -53,28 +52,31 @@ namespace webrtc_pc_e2e { // | Sink | | Stack | | Decoder | // ¯¯¯¯¯¯¯ ¯¯¯¯¯¯¯¯ ¯¯¯¯¯¯¯¯¯ // The analyzer will be injected in all points from A to F. -class VideoQualityAnalyzerInterface : public StatsObserverInterface { +class VideoQualityAnalyzerInterface + : public webrtc_pc_e2e::StatsObserverInterface { public: // Contains extra statistic provided by video encoder. struct EncoderStats { + std::string encoder_name = "unknown"; // TODO(hbos) https://crbug.com/webrtc/9547, // https://crbug.com/webrtc/11443: improve stats API to make available // there. - uint32_t target_encode_bitrate; + uint32_t target_encode_bitrate = 0; }; // Contains extra statistic provided by video decoder. struct DecoderStats { + std::string decoder_name = "unknown"; // Decode time provided by decoder itself. If decoder doesn’t produce such // information can be omitted. - absl::optional decode_time_ms; + absl::optional decode_time_ms = absl::nullopt; }; ~VideoQualityAnalyzerInterface() override = default; // Will be called by framework before test. - // |test_case_name| is name of test case, that should be used to report all + // `test_case_name` is name of test case, that should be used to report all // video metrics. - // |threads_count| is number of threads that analyzer can use for heavy + // `threads_count` is number of threads that analyzer can use for heavy // calculations. Analyzer can perform simple calculations on the calling // thread in each method, but should remember, that it is the same thread, // that is used in video pipeline. @@ -83,57 +85,57 @@ class VideoQualityAnalyzerInterface : public StatsObserverInterface { int max_threads_count) {} // Will be called when frame was generated from the input stream. - // |peer_name| is name of the peer on which side frame was captured. + // `peer_name` is name of the peer on which side frame was captured. // Returns frame id, that will be set by framework to the frame. virtual uint16_t OnFrameCaptured(absl::string_view peer_name, const std::string& stream_label, const VideoFrame& frame) = 0; // Will be called before calling the encoder. - // |peer_name| is name of the peer on which side frame came to encoder. + // `peer_name` is name of the peer on which side frame came to encoder. virtual void OnFramePreEncode(absl::string_view peer_name, const VideoFrame& frame) {} // Will be called for each EncodedImage received from encoder. Single // VideoFrame can produce multiple EncodedImages. Each encoded image will // have id from VideoFrame. - // |peer_name| is name of the peer on which side frame was encoded. + // `peer_name` is name of the peer on which side frame was encoded. virtual void OnFrameEncoded(absl::string_view peer_name, uint16_t frame_id, const EncodedImage& encoded_image, const EncoderStats& stats) {} // Will be called for each frame dropped by encoder. - // |peer_name| is name of the peer on which side frame drop was detected. + // `peer_name` is name of the peer on which side frame drop was detected. virtual void OnFrameDropped(absl::string_view peer_name, EncodedImageCallback::DropReason reason) {} // Will be called before calling the decoder. - // |peer_name| is name of the peer on which side frame was received. + // `peer_name` is name of the peer on which side frame was received. virtual void OnFramePreDecode(absl::string_view peer_name, uint16_t frame_id, const EncodedImage& encoded_image) {} // Will be called after decoding the frame. - // |peer_name| is name of the peer on which side frame was decoded. + // `peer_name` is name of the peer on which side frame was decoded. virtual void OnFrameDecoded(absl::string_view peer_name, const VideoFrame& frame, const DecoderStats& stats) {} // Will be called when frame will be obtained from PeerConnection stack. - // |peer_name| is name of the peer on which side frame was rendered. + // `peer_name` is name of the peer on which side frame was rendered. virtual void OnFrameRendered(absl::string_view peer_name, const VideoFrame& frame) {} // Will be called if encoder return not WEBRTC_VIDEO_CODEC_OK. // All available codes are listed in // modules/video_coding/include/video_error_codes.h - // |peer_name| is name of the peer on which side error acquired. + // `peer_name` is name of the peer on which side error acquired. virtual void OnEncoderError(absl::string_view peer_name, const VideoFrame& frame, int32_t error_code) {} // Will be called if decoder return not WEBRTC_VIDEO_CODEC_OK. // All available codes are listed in // modules/video_coding/include/video_error_codes.h - // |peer_name| is name of the peer on which side error acquired. + // `peer_name` is name of the peer on which side error acquired. virtual void OnDecoderError(absl::string_view peer_name, uint16_t frame_id, int32_t error_code) {} // Will be called every time new stats reports are available for the - // Peer Connection identified by |pc_label|. + // Peer Connection identified by `pc_label`. void OnStatsReports( absl::string_view pc_label, const rtc::scoped_refptr& report) override {} @@ -145,9 +147,17 @@ class VideoQualityAnalyzerInterface : public StatsObserverInterface { // statistics. virtual void Stop() {} + // Returns the last stream where this frame was captured. It means that if + // frame ids space wraps around, then stream label for frame id may change. + // It will crash, if the specified `frame_id` wasn't captured. virtual std::string GetStreamLabel(uint16_t frame_id) = 0; }; +namespace webrtc_pc_e2e { + +// Temporary alias to make downstream projects able to migrate. +using VideoQualityAnalyzerInterface = ::webrtc::VideoQualityAnalyzerInterface; + } // namespace webrtc_pc_e2e } // namespace webrtc diff --git a/api/test/video_quality_test_fixture.h b/api/test/video_quality_test_fixture.h index 92c398aa54..08ae12b816 100644 --- a/api/test/video_quality_test_fixture.h +++ b/api/test/video_quality_test_fixture.h @@ -98,7 +98,7 @@ class VideoQualityTestFixtureInterface { InterLayerPredMode inter_layer_pred = InterLayerPredMode::kOn; // If empty, bitrates are generated in VP9Impl automatically. std::vector spatial_layers; - // If set, default parameters will be used instead of |streams|. + // If set, default parameters will be used instead of `streams`. bool infer_streams = false; } ss[2]; struct Logging { diff --git a/api/test/videocodec_test_fixture.h b/api/test/videocodec_test_fixture.h index e0f804fe46..dbf20993e2 100644 --- a/api/test/videocodec_test_fixture.h +++ b/api/test/videocodec_test_fixture.h @@ -15,9 +15,9 @@ #include #include "api/test/videocodec_test_stats.h" +#include "api/video_codecs/h264_profile_level_id.h" #include "api/video_codecs/video_decoder_factory.h" #include "api/video_codecs/video_encoder_factory.h" -#include "media/base/h264_profile_level_id.h" #include "modules/video_coding/include/video_codec_interface.h" namespace webrtc { @@ -128,6 +128,14 @@ class VideoCodecTestFixture { // Name of the codec being tested. std::string codec_name; + // Encoder and decoder format and parameters. If provided, format is used to + // instantiate the codec. If not provided, the test creates and uses the + // default `SdpVideoFormat` based on `codec_name`. + // Encoder and decoder name (`SdpVideoFormat::name`) should be the same as + // `codec_name`. + absl::optional encoder_format; + absl::optional decoder_format; + // H.264 specific settings. struct H264CodecSettings { H264Profile profile = H264Profile::kProfileConstrainedBaseline; diff --git a/api/test/videocodec_test_stats.cc b/api/test/videocodec_test_stats.cc index b973dc2d12..f082b1e935 100644 --- a/api/test/videocodec_test_stats.cc +++ b/api/test/videocodec_test_stats.cc @@ -86,11 +86,18 @@ std::map VideoCodecTestStats::VideoStatistics::ToMap() map["framerate_fps"] = std::to_string(framerate_fps); map["enc_speed_fps"] = std::to_string(enc_speed_fps); map["dec_speed_fps"] = std::to_string(dec_speed_fps); + map["avg_encode_latency_sec"] = std::to_string(avg_encode_latency_sec); + map["max_encode_latency_sec"] = std::to_string(max_encode_latency_sec); + map["avg_decode_latency_sec"] = std::to_string(avg_decode_latency_sec); + map["max_decode_latency_sec"] = std::to_string(max_decode_latency_sec); map["avg_delay_sec"] = std::to_string(avg_delay_sec); map["max_key_frame_delay_sec"] = std::to_string(max_key_frame_delay_sec); map["max_delta_frame_delay_sec"] = std::to_string(max_delta_frame_delay_sec); map["time_to_reach_target_bitrate_sec"] = std::to_string(time_to_reach_target_bitrate_sec); + map["avg_bitrate_mismatch_pct"] = std::to_string(avg_bitrate_mismatch_pct); + map["avg_framerate_mismatch_pct"] = + std::to_string(avg_framerate_mismatch_pct); map["avg_key_frame_size_bytes"] = std::to_string(avg_key_frame_size_bytes); map["avg_delta_frame_size_bytes"] = std::to_string(avg_delta_frame_size_bytes); diff --git a/api/test/videocodec_test_stats.h b/api/test/videocodec_test_stats.h index 02a18a71d9..a05985a665 100644 --- a/api/test/videocodec_test_stats.h +++ b/api/test/videocodec_test_stats.h @@ -101,10 +101,17 @@ class VideoCodecTestStats { float enc_speed_fps = 0.0f; float dec_speed_fps = 0.0f; + float avg_encode_latency_sec = 0.0f; + float max_encode_latency_sec = 0.0f; + float avg_decode_latency_sec = 0.0f; + float max_decode_latency_sec = 0.0f; + float avg_delay_sec = 0.0f; float max_key_frame_delay_sec = 0.0f; float max_delta_frame_delay_sec = 0.0f; float time_to_reach_target_bitrate_sec = 0.0f; + float avg_bitrate_mismatch_pct = 0.0f; + float avg_framerate_mismatch_pct = 0.0f; float avg_key_frame_size_bytes = 0.0f; float avg_delta_frame_size_bytes = 0.0f; diff --git a/api/transport/bitrate_settings.h b/api/transport/bitrate_settings.h index b6c022dd11..562309a46c 100644 --- a/api/transport/bitrate_settings.h +++ b/api/transport/bitrate_settings.h @@ -18,7 +18,7 @@ namespace webrtc { -// Configuration of send bitrate. The |start_bitrate_bps| value is +// Configuration of send bitrate. The `start_bitrate_bps` value is // used for multiple purposes, both as a prior in the bandwidth // estimator, and for initial configuration of the encoder. We may // want to create separate apis for those, and use a smaller struct diff --git a/api/transport/data_channel_transport_interface.h b/api/transport/data_channel_transport_interface.h index 2b2f5d2e6d..cdae6fee19 100644 --- a/api/transport/data_channel_transport_interface.h +++ b/api/transport/data_channel_transport_interface.h @@ -35,9 +35,6 @@ enum class DataMessageType { // sent reliably and in-order, even if the data channel is configured for // unreliable delivery. struct SendDataParams { - SendDataParams() = default; - SendDataParams(const SendDataParams&) = default; - DataMessageType type = DataMessageType::kText; // Whether to deliver the message in order with respect to other ordered @@ -48,14 +45,14 @@ struct SendDataParams { // retransmitted by the transport before it is dropped. // Setting this value to zero disables retransmission. // Valid values are in the range [0-UINT16_MAX]. - // |max_rtx_count| and |max_rtx_ms| may not be set simultaneously. + // `max_rtx_count` and `max_rtx_ms` may not be set simultaneously. absl::optional max_rtx_count; // If set, the maximum number of milliseconds for which the transport // may retransmit this message before it is dropped. // Setting this value to zero disables retransmission. // Valid values are in the range [0-UINT16_MAX]. - // |max_rtx_count| and |max_rtx_ms| may not be set simultaneously. + // `max_rtx_count` and `max_rtx_ms` may not be set simultaneously. absl::optional max_rtx_ms; }; @@ -96,18 +93,18 @@ class DataChannelTransportInterface { public: virtual ~DataChannelTransportInterface() = default; - // Opens a data |channel_id| for sending. May return an error if the - // specified |channel_id| is unusable. Must be called before |SendData|. + // Opens a data `channel_id` for sending. May return an error if the + // specified `channel_id` is unusable. Must be called before `SendData`. virtual RTCError OpenChannel(int channel_id) = 0; // Sends a data buffer to the remote endpoint using the given send parameters. - // |buffer| may not be larger than 256 KiB. Returns an error if the send + // `buffer` may not be larger than 256 KiB. Returns an error if the send // fails. virtual RTCError SendData(int channel_id, const SendDataParams& params, const rtc::CopyOnWriteBuffer& buffer) = 0; - // Closes |channel_id| gracefully. Returns an error if |channel_id| is not + // Closes `channel_id` gracefully. Returns an error if `channel_id` is not // open. Data sent after the closing procedure begins will not be // transmitted. The channel becomes closed after pending data is transmitted. virtual RTCError CloseChannel(int channel_id) = 0; diff --git a/api/transport/enums.h b/api/transport/enums.h index eb33e919a9..3bc8fd1529 100644 --- a/api/transport/enums.h +++ b/api/transport/enums.h @@ -34,6 +34,16 @@ enum PortPrunePolicy { // on the same network. }; +enum class VpnPreference { + kDefault, // No VPN preference. + kOnlyUseVpn, // only use VPN connections. + kNeverUseVpn, // never use VPN connections + kPreferVpn, // use a VPN connection if possible, + // i.e VPN connections sorts first. + kAvoidVpn, // only use VPN if there is no other connections, + // i.e VPN connections sorts last. +}; + } // namespace webrtc #endif // API_TRANSPORT_ENUMS_H_ diff --git a/api/transport/network_types.cc b/api/transport/network_types.cc index 7451940151..d6495ce490 100644 --- a/api/transport/network_types.cc +++ b/api/transport/network_types.cc @@ -103,8 +103,4 @@ bool PacedPacketInfo::operator==(const PacedPacketInfo& rhs) const { probe_cluster_min_bytes == rhs.probe_cluster_min_bytes; } -ProcessInterval::ProcessInterval() = default; -ProcessInterval::ProcessInterval(const ProcessInterval&) = default; -ProcessInterval::~ProcessInterval() = default; - } // namespace webrtc diff --git a/api/transport/network_types.h b/api/transport/network_types.h index 4e96b0f12e..29a7cf7705 100644 --- a/api/transport/network_types.h +++ b/api/transport/network_types.h @@ -241,9 +241,6 @@ struct NetworkControlUpdate { // Process control struct ProcessInterval { - ProcessInterval(); - ProcessInterval(const ProcessInterval&); - ~ProcessInterval(); Timestamp at_time = Timestamp::PlusInfinity(); absl::optional pacer_queue; }; diff --git a/api/transport/rtp/BUILD.gn b/api/transport/rtp/BUILD.gn index 7b01169360..26036c7f32 100644 --- a/api/transport/rtp/BUILD.gn +++ b/api/transport/rtp/BUILD.gn @@ -24,7 +24,10 @@ rtc_source_set("dependency_descriptor") { "dependency_descriptor.cc", "dependency_descriptor.h", ] - deps = [ "../../../rtc_base:checks" ] + deps = [ + "../../../rtc_base:checks", + "../../video:render_resolution", + ] absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector", "//third_party/abseil-cpp/absl/strings", diff --git a/api/transport/rtp/dependency_descriptor.cc b/api/transport/rtp/dependency_descriptor.cc index 2a9b6d9a71..e784853cd6 100644 --- a/api/transport/rtp/dependency_descriptor.cc +++ b/api/transport/rtp/dependency_descriptor.cc @@ -43,7 +43,7 @@ absl::InlinedVector StringToDecodeTargetIndications( indication = DecodeTargetIndication::kSwitch; break; default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } dtis.push_back(indication); } diff --git a/api/transport/rtp/dependency_descriptor.h b/api/transport/rtp/dependency_descriptor.h index 6967c83517..0db600918e 100644 --- a/api/transport/rtp/dependency_descriptor.h +++ b/api/transport/rtp/dependency_descriptor.h @@ -20,30 +20,11 @@ #include "absl/container/inlined_vector.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" +#include "api/video/render_resolution.h" namespace webrtc { // Structures to build and parse dependency descriptor as described in // https://aomediacodec.github.io/av1-rtp-spec/#dependency-descriptor-rtp-header-extension -class RenderResolution { - public: - constexpr RenderResolution() = default; - constexpr RenderResolution(int width, int height) - : width_(width), height_(height) {} - RenderResolution(const RenderResolution&) = default; - RenderResolution& operator=(const RenderResolution&) = default; - - friend bool operator==(const RenderResolution& lhs, - const RenderResolution& rhs) { - return lhs.width_ == rhs.width_ && lhs.height_ == rhs.height_; - } - - constexpr int Width() const { return width_; } - constexpr int Height() const { return height_; } - - private: - int width_ = 0; - int height_ = 0; -}; // Relationship of a frame to a Decode target. enum class DecodeTargetIndication { diff --git a/api/transport/sctp_transport_factory_interface.h b/api/transport/sctp_transport_factory_interface.h index 912be3a374..4fc8af5bad 100644 --- a/api/transport/sctp_transport_factory_interface.h +++ b/api/transport/sctp_transport_factory_interface.h @@ -32,7 +32,7 @@ class SctpTransportFactoryInterface { public: virtual ~SctpTransportFactoryInterface() = default; - // Create an SCTP transport using |channel| for the underlying transport. + // Create an SCTP transport using `channel` for the underlying transport. virtual std::unique_ptr CreateSctpTransport( rtc::PacketTransportInternal* channel) = 0; }; diff --git a/api/transport/stun.cc b/api/transport/stun.cc index 1b5bf0c409..87da0058d3 100644 --- a/api/transport/stun.cc +++ b/api/transport/stun.cc @@ -31,7 +31,6 @@ namespace cricket { namespace { const int k127Utf8CharactersLengthInBytes = 508; -const int kDefaultMaxAttributeLength = 508; const int kMessageIntegrityAttributeLength = 20; const int kTheoreticalMaximumAttributeLength = 65535; @@ -68,12 +67,6 @@ bool LengthValid(int type, int length) { case STUN_ATTR_SOFTWARE: return length <= k127Utf8CharactersLengthInBytes; // RFC 8489 section 14.14 - case STUN_ATTR_ORIGIN: - // 0x802F is unassigned by IANA. - // RESPONSE-ORIGIN is defined in RFC 5780 section 7.3, but does not - // specify a maximum length. It's an URL, so return an arbitrary - // restriction. - return length <= kDefaultMaxAttributeLength; case STUN_ATTR_DATA: // No length restriction in RFC; it's the content of an UDP datagram, // which in theory can be up to 65.535 bytes. @@ -83,7 +76,7 @@ bool LengthValid(int type, int length) { // Return an arbitrary restriction for all other types. return length <= kTheoreticalMaximumAttributeLength; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return true; } @@ -94,7 +87,6 @@ const char STUN_ERROR_REASON_BAD_REQUEST[] = "Bad Request"; const char STUN_ERROR_REASON_UNAUTHORIZED[] = "Unauthorized"; const char STUN_ERROR_REASON_UNKNOWN_ATTRIBUTE[] = "Unknown Attribute"; const char STUN_ERROR_REASON_FORBIDDEN[] = "Forbidden"; -const char STUN_ERROR_REASON_STALE_CREDENTIALS[] = "Stale Credentials"; const char STUN_ERROR_REASON_ALLOCATION_MISMATCH[] = "Allocation Mismatch"; const char STUN_ERROR_REASON_STALE_NONCE[] = "Stale Nonce"; const char STUN_ERROR_REASON_WRONG_CREDENTIALS[] = "Wrong Credentials"; @@ -621,8 +613,6 @@ StunAttributeValueType StunMessage::GetAttributeValueType(int type) const { return STUN_VALUE_ADDRESS; case STUN_ATTR_FINGERPRINT: return STUN_VALUE_UINT32; - case STUN_ATTR_ORIGIN: - return STUN_VALUE_BYTE_STRING; case STUN_ATTR_RETRANSMIT_COUNT: return STUN_VALUE_UINT32; case STUN_ATTR_GOOG_LAST_ICE_CHECK_RECEIVED: diff --git a/api/transport/stun.h b/api/transport/stun.h index 682a17a945..766b9ec368 100644 --- a/api/transport/stun.h +++ b/api/transport/stun.h @@ -62,7 +62,6 @@ enum StunAttributeType { STUN_ATTR_SOFTWARE = 0x8022, // ByteString STUN_ATTR_ALTERNATE_SERVER = 0x8023, // Address STUN_ATTR_FINGERPRINT = 0x8028, // UInt32 - STUN_ATTR_ORIGIN = 0x802F, // ByteString STUN_ATTR_RETRANSMIT_COUNT = 0xFF00 // UInt32 }; @@ -95,7 +94,6 @@ enum StunErrorCode { STUN_ERROR_BAD_REQUEST = 400, STUN_ERROR_UNAUTHORIZED = 401, STUN_ERROR_UNKNOWN_ATTRIBUTE = 420, - STUN_ERROR_STALE_CREDENTIALS = 430, // GICE only STUN_ERROR_STALE_NONCE = 438, STUN_ERROR_SERVER_ERROR = 500, STUN_ERROR_GLOBAL_FAILURE = 600 @@ -106,7 +104,6 @@ extern const char STUN_ERROR_REASON_TRY_ALTERNATE_SERVER[]; extern const char STUN_ERROR_REASON_BAD_REQUEST[]; extern const char STUN_ERROR_REASON_UNAUTHORIZED[]; extern const char STUN_ERROR_REASON_UNKNOWN_ATTRIBUTE[]; -extern const char STUN_ERROR_REASON_STALE_CREDENTIALS[]; extern const char STUN_ERROR_REASON_STALE_NONCE[]; extern const char STUN_ERROR_REASON_SERVER_ERROR[]; @@ -254,11 +251,11 @@ class StunMessage { // This is used for testing. void SetStunMagicCookie(uint32_t val); - // Contruct a copy of |this|. + // Contruct a copy of `this`. std::unique_ptr Clone() const; - // Check if the attributes of this StunMessage equals those of |other| - // for all attributes that |attribute_type_mask| return true + // Check if the attributes of this StunMessage equals those of `other` + // for all attributes that `attribute_type_mask` return true bool EqualAttributes(const StunMessage* other, std::function attribute_type_mask) const; @@ -570,11 +567,11 @@ class StunUInt16ListAttribute : public StunAttribute { std::string StunMethodToString(int msg_type); // Returns the (successful) response type for the given request type. -// Returns -1 if |request_type| is not a valid request type. +// Returns -1 if `request_type` is not a valid request type. int GetStunSuccessResponseType(int request_type); // Returns the error response type for the given request type. -// Returns -1 if |request_type| is not a valid request type. +// Returns -1 if `request_type` is not a valid request type. int GetStunErrorResponseType(int request_type); // Returns whether a given message is a request type. @@ -595,13 +592,13 @@ bool ComputeStunCredentialHash(const std::string& username, const std::string& password, std::string* hash); -// Make a copy af |attribute| and return a new StunAttribute. +// Make a copy af `attribute` and return a new StunAttribute. // This is useful if you don't care about what kind of attribute you // are handling. // // The implementation copies by calling Write() followed by Read(). // -// If |tmp_buffer| is supplied this buffer will be used, otherwise +// If `tmp_buffer` is supplied this buffer will be used, otherwise // a buffer will created in the method. std::unique_ptr CopyStunAttribute( const StunAttribute& attribute, diff --git a/api/transport/stun_unittest.cc b/api/transport/stun_unittest.cc index bf791f257d..e180703817 100644 --- a/api/transport/stun_unittest.cc +++ b/api/transport/stun_unittest.cc @@ -199,20 +199,6 @@ static const unsigned char kStunMessageWithErrorAttribute[] = { 0x69, 0x7a, 0x65, 0x64 }; -static const unsigned char kStunMessageWithOriginAttribute[] = { - 0x00, 0x01, 0x00, 0x18, // message header (binding request), length 24 - 0x21, 0x12, 0xA4, 0x42, // magic cookie - 0x29, 0x1f, 0xcd, 0x7c, // transaction id - 0xba, 0x58, 0xab, 0xd7, - 0xf2, 0x41, 0x01, 0x00, - 0x80, 0x2f, 0x00, 0x12, // origin attribute (length 18) - 0x68, 0x74, 0x74, 0x70, // http://example.com - 0x3A, 0x2F, 0x2F, 0x65, - 0x78, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x2e, 0x63, - 0x6f, 0x6d, 0x00, 0x00, -}; - // Sample messages with an invalid length Field // The actual length in bytes of the invalid messages (including STUN header) @@ -551,7 +537,6 @@ const in_addr kIPv4TestAddress1 = {{{0x0ac, 0x017, 0x044, 0x0e6}}}; const char kTestUserName1[] = "abcdefgh"; const char kTestUserName2[] = "abc"; const char kTestErrorReason[] = "Unauthorized"; -const char kTestOrigin[] = "http://example.com"; const int kTestErrorClass = 4; const int kTestErrorNumber = 1; const int kTestErrorCode = 401; @@ -1097,15 +1082,6 @@ TEST_F(StunTest, ReadMessageWithAnUnknownAttribute) { EXPECT_EQ(kTestUserName2, username->GetString()); } -TEST_F(StunTest, ReadMessageWithOriginAttribute) { - StunMessage msg; - size_t size = ReadStunMessage(&msg, kStunMessageWithOriginAttribute); - CheckStunHeader(msg, STUN_BINDING_REQUEST, size); - const StunByteStringAttribute* origin = msg.GetByteString(STUN_ATTR_ORIGIN); - ASSERT_TRUE(origin != NULL); - EXPECT_EQ(kTestOrigin, origin->GetString()); -} - TEST_F(StunTest, WriteMessageWithAnErrorCodeAttribute) { StunMessage msg; size_t size = sizeof(kStunMessageWithErrorAttribute); @@ -1152,25 +1128,6 @@ TEST_F(StunTest, WriteMessageWithAUInt16ListAttribute) { memcmp(out.Data(), kStunMessageWithUInt16ListAttribute, size - 2)); } -TEST_F(StunTest, WriteMessageWithOriginAttribute) { - StunMessage msg; - size_t size = sizeof(kStunMessageWithOriginAttribute); - - msg.SetType(STUN_BINDING_REQUEST); - msg.SetTransactionID( - std::string(reinterpret_cast(kTestTransactionId1), - kStunTransactionIdLength)); - auto origin = - std::make_unique(STUN_ATTR_ORIGIN, kTestOrigin); - msg.AddAttribute(std::move(origin)); - - rtc::ByteBufferWriter out; - EXPECT_TRUE(msg.Write(&out)); - ASSERT_EQ(size, out.Length()); - // Check everything up to the padding - ASSERT_EQ(0, memcmp(out.Data(), kStunMessageWithOriginAttribute, size - 2)); -} - // Test that we fail to read messages with invalid lengths. void CheckFailureToRead(const unsigned char* testcase, size_t length) { StunMessage msg; diff --git a/api/turn_customizer.h b/api/turn_customizer.h index f0bf0d9a35..50e406516e 100644 --- a/api/turn_customizer.h +++ b/api/turn_customizer.h @@ -29,7 +29,7 @@ class TurnCustomizer { cricket::StunMessage* message) = 0; // TURN can send data using channel data messages or Send indication. - // This method should return false if |data| should be sent using + // This method should return false if `data` should be sent using // a Send indication instead of a ChannelData message, even if a // channel is bound. virtual bool AllowChannelData(cricket::PortInterface* port, diff --git a/api/video/BUILD.gn b/api/video/BUILD.gn index ec90bc137e..9fd28914b2 100644 --- a/api/video/BUILD.gn +++ b/api/video/BUILD.gn @@ -43,6 +43,8 @@ rtc_library("video_frame") { sources = [ "i420_buffer.cc", "i420_buffer.h", + "i444_buffer.cc", + "i444_buffer.h", "nv12_buffer.cc", "nv12_buffer.h", "video_codec_type.h", @@ -60,6 +62,7 @@ rtc_library("video_frame") { "..:array_view", "..:rtp_packet_info", "..:scoped_refptr", + "..:video_track_source_constraints", "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", "../../rtc_base/memory:aligned_malloc", @@ -71,7 +74,7 @@ rtc_library("video_frame") { if (is_android) { java_cpp_enum("video_frame_enums") { - sources = [ "video_codec_type.h" ] + sources = [ "video_frame_buffer.h" ] } } @@ -112,6 +115,11 @@ rtc_source_set("video_frame_type") { sources = [ "video_frame_type.h" ] } +rtc_source_set("render_resolution") { + visibility = [ "*" ] + public = [ "render_resolution.h" ] +} + rtc_library("encoded_image") { visibility = [ "*" ] sources = [ @@ -154,6 +162,7 @@ rtc_library("rtp_video_frame_assembler") { ":encoded_frame", "../../modules/rtp_rtcp:rtp_rtcp", "../../modules/rtp_rtcp:rtp_rtcp_format", + "../../modules/video_coding:packet_buffer", "../../modules/video_coding:video_coding", "../../rtc_base:logging", ] diff --git a/api/video/DEPS b/api/video/DEPS index cf6770dce0..5a3e496bcf 100644 --- a/api/video/DEPS +++ b/api/video/DEPS @@ -18,6 +18,10 @@ specific_include_rules = { "+rtc_base/memory/aligned_malloc.h", ], + "i444_buffer\.h": [ + "+rtc_base/memory/aligned_malloc.h", + ], + "nv12_buffer\.h": [ "+rtc_base/memory/aligned_malloc.h", ], diff --git a/api/video/color_space.cc b/api/video/color_space.cc index 710bb432ab..a0cd32edb2 100644 --- a/api/video/color_space.cc +++ b/api/video/color_space.cc @@ -12,7 +12,7 @@ namespace webrtc { namespace { -// Try to convert |enum_value| into the enum class T. |enum_bitmask| is created +// Try to convert `enum_value` into the enum class T. `enum_bitmask` is created // by the funciton below. Returns true if conversion was successful, false // otherwise. template @@ -43,7 +43,7 @@ constexpr int MakeMask(const int index, const int length, T (&values)[N]) { } // Create a bitmask where each bit corresponds to one potential enum value. -// |values| should be an array listing all possible enum values. The bit is set +// `values` should be an array listing all possible enum values. The bit is set // to one if the corresponding enum exists. Only works for enums with values // less than 64. template diff --git a/api/video/encoded_frame.cc b/api/video/encoded_frame.cc index 42d6b06b84..86d1a698a7 100644 --- a/api/video/encoded_frame.cc +++ b/api/video/encoded_frame.cc @@ -13,7 +13,7 @@ namespace webrtc { bool EncodedFrame::delayed_by_retransmission() const { - return 0; + return false; } } // namespace webrtc diff --git a/api/video/encoded_frame.h b/api/video/encoded_frame.h index 5f046327fa..3ef26caf6e 100644 --- a/api/video/encoded_frame.h +++ b/api/video/encoded_frame.h @@ -46,7 +46,7 @@ class EncodedFrame : public webrtc::VCMEncodedFrame { int64_t Id() const { return id_; } // TODO(philipel): Add simple modify/access functions to prevent adding too - // many |references|. + // many `references`. size_t num_references = 0; int64_t references[kMaxFrameReferences]; // Is this subframe the last one in the superframe (In RTP stream that would diff --git a/api/video/encoded_image.h b/api/video/encoded_image.h index dae4e3a60a..88df34916b 100644 --- a/api/video/encoded_image.h +++ b/api/video/encoded_image.h @@ -98,7 +98,7 @@ class RTC_EXPORT EncodedImage { } // These methods can be used to set/get size of subframe with spatial index - // |spatial_index| on encoded frames that consist of multiple spatial layers. + // `spatial_index` on encoded frames that consist of multiple spatial layers. absl::optional SpatialLayerFrameSize(int spatial_index) const; void SetSpatialLayerFrameSize(int spatial_index, size_t size_bytes); @@ -154,6 +154,16 @@ class RTC_EXPORT EncodedImage { return encoded_data_ ? encoded_data_->data() : nullptr; } + // Returns whether the encoded image can be considered to be of target + // quality. + bool IsAtTargetQuality() const { return at_target_quality_; } + + // Sets that the encoded image can be considered to be of target quality to + // true or false. + void SetAtTargetQuality(bool at_target_quality) { + at_target_quality_ = at_target_quality; + } + uint32_t _encodedWidth = 0; uint32_t _encodedHeight = 0; // NTP time of the capture time in local timebase in milliseconds. @@ -195,11 +205,13 @@ class RTC_EXPORT EncodedImage { // carries the webrtc::VideoFrame id field from the sender to the receiver. absl::optional video_frame_tracking_id_; // Information about packets used to assemble this video frame. This is needed - // by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's + // by `SourceTracker` when the frame is delivered to the RTCRtpReceiver's // MediaStreamTrack, in order to implement getContributingSources(). See: // https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources RtpPacketInfos packet_infos_; bool retransmission_allowed_ = true; + // True if the encoded image can be considered to be of target quality. + bool at_target_quality_ = false; }; } // namespace webrtc diff --git a/api/video/i010_buffer.cc b/api/video/i010_buffer.cc index 74d37d1b57..b98e586562 100644 --- a/api/video/i010_buffer.cc +++ b/api/video/i010_buffer.cc @@ -117,7 +117,7 @@ rtc::scoped_refptr I010Buffer::Rotate( switch (rotation) { // This case is covered by the early return. case webrtc::kVideoRotation_0: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); break; case webrtc::kVideoRotation_90: dest_x = src.height() - y - 1; @@ -232,35 +232,4 @@ void I010Buffer::ScaleFrom(const I010BufferInterface& src) { CropAndScaleFrom(src, 0, 0, src.width(), src.height()); } -void I010Buffer::PasteFrom(const I010BufferInterface& picture, - int offset_col, - int offset_row) { - RTC_CHECK_LE(picture.width() + offset_col, width()); - RTC_CHECK_LE(picture.height() + offset_row, height()); - RTC_CHECK_GE(offset_col, 0); - RTC_CHECK_GE(offset_row, 0); - - // Pasted picture has to be aligned so subsumpled UV plane isn't corrupted. - RTC_CHECK(offset_col % 2 == 0); - RTC_CHECK(offset_row % 2 == 0); - RTC_CHECK(picture.width() % 2 == 0 || - picture.width() + offset_col == width()); - RTC_CHECK(picture.height() % 2 == 0 || - picture.height() + offset_row == height()); - - libyuv::CopyPlane_16(picture.DataY(), picture.StrideY(), - MutableDataY() + StrideY() * offset_row + offset_col, - StrideY(), picture.width(), picture.height()); - - libyuv::CopyPlane_16( - picture.DataU(), picture.StrideU(), - MutableDataU() + StrideU() * offset_row / 2 + offset_col / 2, StrideU(), - picture.width() / 2, picture.height() / 2); - - libyuv::CopyPlane_16( - picture.DataV(), picture.StrideV(), - MutableDataV() + StrideV() * offset_row / 2 + offset_col / 2, StrideV(), - picture.width() / 2, picture.height() / 2); -} - } // namespace webrtc diff --git a/api/video/i010_buffer.h b/api/video/i010_buffer.h index 6299927231..11e0879fec 100644 --- a/api/video/i010_buffer.h +++ b/api/video/i010_buffer.h @@ -34,7 +34,7 @@ class I010Buffer : public I010BufferInterface { // Convert and put I420 buffer into a new buffer. static rtc::scoped_refptr Copy(const I420BufferInterface& buffer); - // Return a rotated copy of |src|. + // Return a rotated copy of `src`. static rtc::scoped_refptr Rotate(const I010BufferInterface& src, VideoRotation rotation); @@ -55,23 +55,17 @@ class I010Buffer : public I010BufferInterface { uint16_t* MutableDataU(); uint16_t* MutableDataV(); - // Scale the cropped area of |src| to the size of |this| buffer, and - // write the result into |this|. + // Scale the cropped area of `src` to the size of `this` buffer, and + // write the result into `this`. void CropAndScaleFrom(const I010BufferInterface& src, int offset_x, int offset_y, int crop_width, int crop_height); - // Scale all of |src| to the size of |this| buffer, with no cropping. + // Scale all of `src` to the size of `this` buffer, with no cropping. void ScaleFrom(const I010BufferInterface& src); - // Pastes whole picture to canvas at (offset_row, offset_col). - // Offsets and picture dimensions must be even. - void PasteFrom(const I010BufferInterface& picture, - int offset_col, - int offset_row); - protected: I010Buffer(int width, int height, int stride_y, int stride_u, int stride_v); ~I010Buffer() override; diff --git a/api/video/i420_buffer.cc b/api/video/i420_buffer.cc index 8783a4a313..deecf1d71d 100644 --- a/api/video/i420_buffer.cc +++ b/api/video/i420_buffer.cc @@ -229,35 +229,4 @@ void I420Buffer::ScaleFrom(const I420BufferInterface& src) { CropAndScaleFrom(src, 0, 0, src.width(), src.height()); } -void I420Buffer::PasteFrom(const I420BufferInterface& picture, - int offset_col, - int offset_row) { - RTC_CHECK_LE(picture.width() + offset_col, width()); - RTC_CHECK_LE(picture.height() + offset_row, height()); - RTC_CHECK_GE(offset_col, 0); - RTC_CHECK_GE(offset_row, 0); - - // Pasted picture has to be aligned so subsumpled UV plane isn't corrupted. - RTC_CHECK(offset_col % 2 == 0); - RTC_CHECK(offset_row % 2 == 0); - RTC_CHECK(picture.width() % 2 == 0 || - picture.width() + offset_col == width()); - RTC_CHECK(picture.height() % 2 == 0 || - picture.height() + offset_row == height()); - - libyuv::CopyPlane(picture.DataY(), picture.StrideY(), - MutableDataY() + StrideY() * offset_row + offset_col, - StrideY(), picture.width(), picture.height()); - - libyuv::CopyPlane( - picture.DataU(), picture.StrideU(), - MutableDataU() + StrideU() * offset_row / 2 + offset_col / 2, StrideU(), - picture.width() / 2, picture.height() / 2); - - libyuv::CopyPlane( - picture.DataV(), picture.StrideV(), - MutableDataV() + StrideV() * offset_row / 2 + offset_col / 2, StrideV(), - picture.width() / 2, picture.height() / 2); -} - } // namespace webrtc diff --git a/api/video/i420_buffer.h b/api/video/i420_buffer.h index 251eb93d30..af52c64fb4 100644 --- a/api/video/i420_buffer.h +++ b/api/video/i420_buffer.h @@ -49,7 +49,7 @@ class RTC_EXPORT I420Buffer : public I420BufferInterface { const uint8_t* data_v, int stride_v); - // Returns a rotated copy of |src|. + // Returns a rotated copy of `src`. static rtc::scoped_refptr Rotate(const I420BufferInterface& src, VideoRotation rotation); // Deprecated. @@ -83,8 +83,8 @@ class RTC_EXPORT I420Buffer : public I420BufferInterface { uint8_t* MutableDataU(); uint8_t* MutableDataV(); - // Scale the cropped area of |src| to the size of |this| buffer, and - // write the result into |this|. + // Scale the cropped area of `src` to the size of `this` buffer, and + // write the result into `this`. void CropAndScaleFrom(const I420BufferInterface& src, int offset_x, int offset_y, @@ -95,15 +95,9 @@ class RTC_EXPORT I420Buffer : public I420BufferInterface { // aspect ratio without distorting the image. void CropAndScaleFrom(const I420BufferInterface& src); - // Scale all of |src| to the size of |this| buffer, with no cropping. + // Scale all of `src` to the size of `this` buffer, with no cropping. void ScaleFrom(const I420BufferInterface& src); - // Pastes whole picture to canvas at (offset_row, offset_col). - // Offsets and picture dimensions must be even. - void PasteFrom(const I420BufferInterface& picture, - int offset_col, - int offset_row); - protected: I420Buffer(int width, int height); I420Buffer(int width, int height, int stride_y, int stride_u, int stride_v); diff --git a/api/video/i444_buffer.cc b/api/video/i444_buffer.cc new file mode 100644 index 0000000000..8bf9f76625 --- /dev/null +++ b/api/video/i444_buffer.cc @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "api/video/i444_buffer.h" + +#include + +#include +#include + +#include "api/video/i420_buffer.h" +#include "rtc_base/checks.h" +#include "rtc_base/ref_counted_object.h" +#include "third_party/libyuv/include/libyuv/convert.h" +#include "third_party/libyuv/include/libyuv/planar_functions.h" +#include "third_party/libyuv/include/libyuv/scale.h" + +// Aligning pointer to 64 bytes for improved performance, e.g. use SIMD. +static const int kBufferAlignment = 64; + +namespace webrtc { + +namespace { + +int I444DataSize(int height, int stride_y, int stride_u, int stride_v) { + return stride_y * height + stride_u * height + stride_v * height; +} + +} // namespace + +I444Buffer::I444Buffer(int width, int height) + : I444Buffer(width, height, width, (width), (width)) {} + +I444Buffer::I444Buffer(int width, + int height, + int stride_y, + int stride_u, + int stride_v) + : width_(width), + height_(height), + stride_y_(stride_y), + stride_u_(stride_u), + stride_v_(stride_v), + data_(static_cast( + AlignedMalloc(I444DataSize(height, stride_y, stride_u, stride_v), + kBufferAlignment))) { + RTC_DCHECK_GT(width, 0); + RTC_DCHECK_GT(height, 0); + RTC_DCHECK_GE(stride_y, width); + RTC_DCHECK_GE(stride_u, (width)); + RTC_DCHECK_GE(stride_v, (width)); +} + +I444Buffer::~I444Buffer() {} + +// static +rtc::scoped_refptr I444Buffer::Create(int width, int height) { + return rtc::make_ref_counted(width, height); +} + +// static +rtc::scoped_refptr I444Buffer::Create(int width, + int height, + int stride_y, + int stride_u, + int stride_v) { + return rtc::make_ref_counted(width, height, stride_y, stride_u, + stride_v); +} + +// static +rtc::scoped_refptr I444Buffer::Copy( + const I444BufferInterface& source) { + return Copy(source.width(), source.height(), source.DataY(), source.StrideY(), + source.DataU(), source.StrideU(), source.DataV(), + source.StrideV()); +} + +// static +rtc::scoped_refptr I444Buffer::Copy(int width, + int height, + const uint8_t* data_y, + int stride_y, + const uint8_t* data_u, + int stride_u, + const uint8_t* data_v, + int stride_v) { + // Note: May use different strides than the input data. + rtc::scoped_refptr buffer = Create(width, height); + RTC_CHECK_EQ(0, libyuv::I444Copy(data_y, stride_y, data_u, stride_u, data_v, + stride_v, buffer->MutableDataY(), + buffer->StrideY(), buffer->MutableDataU(), + buffer->StrideU(), buffer->MutableDataV(), + buffer->StrideV(), width, height)); + return buffer; +} + +// static +rtc::scoped_refptr I444Buffer::Rotate( + const I444BufferInterface& src, + VideoRotation rotation) { + RTC_CHECK(src.DataY()); + RTC_CHECK(src.DataU()); + RTC_CHECK(src.DataV()); + + int rotated_width = src.width(); + int rotated_height = src.height(); + if (rotation == webrtc::kVideoRotation_90 || + rotation == webrtc::kVideoRotation_270) { + std::swap(rotated_width, rotated_height); + } + + rtc::scoped_refptr buffer = + I444Buffer::Create(rotated_width, rotated_height); + + RTC_CHECK_EQ(0, + libyuv::I444Rotate( + src.DataY(), src.StrideY(), src.DataU(), src.StrideU(), + src.DataV(), src.StrideV(), buffer->MutableDataY(), + buffer->StrideY(), buffer->MutableDataU(), buffer->StrideU(), + buffer->MutableDataV(), buffer->StrideV(), src.width(), + src.height(), static_cast(rotation))); + + return buffer; +} + +rtc::scoped_refptr I444Buffer::ToI420() { + rtc::scoped_refptr i420_buffer = + I420Buffer::Create(width(), height()); + libyuv::I444ToI420(DataY(), StrideY(), DataU(), StrideU(), DataV(), StrideV(), + i420_buffer->MutableDataY(), i420_buffer->StrideY(), + i420_buffer->MutableDataU(), i420_buffer->StrideU(), + i420_buffer->MutableDataV(), i420_buffer->StrideV(), + width(), height()); + return i420_buffer; +} + +void I444Buffer::InitializeData() { + memset(data_.get(), 0, + I444DataSize(height_, stride_y_, stride_u_, stride_v_)); +} + +int I444Buffer::width() const { + return width_; +} + +int I444Buffer::height() const { + return height_; +} + +const uint8_t* I444Buffer::DataY() const { + return data_.get(); +} +const uint8_t* I444Buffer::DataU() const { + return data_.get() + stride_y_ * height_; +} +const uint8_t* I444Buffer::DataV() const { + return data_.get() + stride_y_ * height_ + stride_u_ * ((height_)); +} + +int I444Buffer::StrideY() const { + return stride_y_; +} +int I444Buffer::StrideU() const { + return stride_u_; +} +int I444Buffer::StrideV() const { + return stride_v_; +} + +uint8_t* I444Buffer::MutableDataY() { + return const_cast(DataY()); +} +uint8_t* I444Buffer::MutableDataU() { + return const_cast(DataU()); +} +uint8_t* I444Buffer::MutableDataV() { + return const_cast(DataV()); +} + +void I444Buffer::CropAndScaleFrom(const I444BufferInterface& src, + int offset_x, + int offset_y, + int crop_width, + int crop_height) { + RTC_CHECK_LE(crop_width, src.width()); + RTC_CHECK_LE(crop_height, src.height()); + RTC_CHECK_LE(crop_width + offset_x, src.width()); + RTC_CHECK_LE(crop_height + offset_y, src.height()); + RTC_CHECK_GE(offset_x, 0); + RTC_CHECK_GE(offset_y, 0); + + const uint8_t* y_plane = src.DataY() + src.StrideY() * offset_y + offset_x; + const uint8_t* u_plane = src.DataU() + src.StrideU() * offset_y + offset_x; + const uint8_t* v_plane = src.DataV() + src.StrideV() * offset_y + offset_x; + int res = + libyuv::I444Scale(y_plane, src.StrideY(), u_plane, src.StrideU(), v_plane, + src.StrideV(), crop_width, crop_height, MutableDataY(), + StrideY(), MutableDataU(), StrideU(), MutableDataV(), + StrideV(), width(), height(), libyuv::kFilterBox); + + RTC_DCHECK_EQ(res, 0); +} + +} // namespace webrtc diff --git a/api/video/i444_buffer.h b/api/video/i444_buffer.h new file mode 100644 index 0000000000..557bf4f3e0 --- /dev/null +++ b/api/video/i444_buffer.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_VIDEO_I444_BUFFER_H_ +#define API_VIDEO_I444_BUFFER_H_ + +#include + +#include + +#include "api/scoped_refptr.h" +#include "api/video/video_frame_buffer.h" +#include "api/video/video_rotation.h" +#include "rtc_base/memory/aligned_malloc.h" +#include "rtc_base/system/rtc_export.h" + +namespace webrtc { + +// Plain I444 buffer in standard memory. +// I444 represents an image with in YUV format withouth any chroma subsampling. +// https://en.wikipedia.org/wiki/Chroma_subsampling#4:4:4 +class RTC_EXPORT I444Buffer : public I444BufferInterface { + public: + static rtc::scoped_refptr Create(int width, int height); + static rtc::scoped_refptr Create(int width, + int height, + int stride_y, + int stride_u, + int stride_v); + + // Create a new buffer and copy the pixel data. + static rtc::scoped_refptr Copy(const I444BufferInterface& buffer); + + static rtc::scoped_refptr Copy(int width, + int height, + const uint8_t* data_y, + int stride_y, + const uint8_t* data_u, + int stride_u, + const uint8_t* data_v, + int stride_v); + + // Returns a rotated copy of |src|. + static rtc::scoped_refptr Rotate(const I444BufferInterface& src, + VideoRotation rotation); + + rtc::scoped_refptr ToI420() final; + const I420BufferInterface* GetI420() const final { return nullptr; } + + // Sets all three planes to all zeros. Used to work around for + // quirks in memory checkers + // (https://bugs.chromium.org/p/libyuv/issues/detail?id=377) and + // ffmpeg (http://crbug.com/390941). + // TODO(nisse): Deprecated. Should be deleted if/when those issues + // are resolved in a better way. Or in the mean time, use SetBlack. + void InitializeData(); + + int width() const override; + int height() const override; + const uint8_t* DataY() const override; + const uint8_t* DataU() const override; + const uint8_t* DataV() const override; + + int StrideY() const override; + int StrideU() const override; + int StrideV() const override; + + uint8_t* MutableDataY(); + uint8_t* MutableDataU(); + uint8_t* MutableDataV(); + + // Scale the cropped area of |src| to the size of |this| buffer, and + // write the result into |this|. + void CropAndScaleFrom(const I444BufferInterface& src, + int offset_x, + int offset_y, + int crop_width, + int crop_height); + + protected: + I444Buffer(int width, int height); + I444Buffer(int width, int height, int stride_y, int stride_u, int stride_v); + + ~I444Buffer() override; + + private: + const int width_; + const int height_; + const int stride_y_; + const int stride_u_; + const int stride_v_; + const std::unique_ptr data_; +}; + +} // namespace webrtc + +#endif // API_VIDEO_I444_BUFFER_H_ diff --git a/api/video/nv12_buffer.h b/api/video/nv12_buffer.h index cb989e84b0..7baef2aeba 100644 --- a/api/video/nv12_buffer.h +++ b/api/video/nv12_buffer.h @@ -56,8 +56,8 @@ class RTC_EXPORT NV12Buffer : public NV12BufferInterface { // are resolved in a better way. Or in the mean time, use SetBlack. void InitializeData(); - // Scale the cropped area of |src| to the size of |this| buffer, and - // write the result into |this|. + // Scale the cropped area of `src` to the size of `this` buffer, and + // write the result into `this`. void CropAndScaleFrom(const NV12BufferInterface& src, int offset_x, int offset_y, diff --git a/api/video/render_resolution.h b/api/video/render_resolution.h new file mode 100644 index 0000000000..edcf8f8ee5 --- /dev/null +++ b/api/video/render_resolution.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_VIDEO_RENDER_RESOLUTION_H_ +#define API_VIDEO_RENDER_RESOLUTION_H_ + +namespace webrtc { + +class RenderResolution { + public: + constexpr RenderResolution() = default; + constexpr RenderResolution(int width, int height) + : width_(width), height_(height) {} + RenderResolution(const RenderResolution&) = default; + RenderResolution& operator=(const RenderResolution&) = default; + + friend bool operator==(const RenderResolution& lhs, + const RenderResolution& rhs) { + return lhs.width_ == rhs.width_ && lhs.height_ == rhs.height_; + } + friend bool operator!=(const RenderResolution& lhs, + const RenderResolution& rhs) { + return !(lhs == rhs); + } + + constexpr bool Valid() const { return width_ > 0 && height_ > 0; } + + constexpr int Width() const { return width_; } + constexpr int Height() const { return height_; } + + private: + int width_ = 0; + int height_ = 0; +}; + +} // namespace webrtc + +#endif // API_VIDEO_RENDER_RESOLUTION_H_ diff --git a/api/video/rtp_video_frame_assembler.cc b/api/video/rtp_video_frame_assembler.cc index 8f3d04c30b..81c08389bb 100644 --- a/api/video/rtp_video_frame_assembler.cc +++ b/api/video/rtp_video_frame_assembler.cc @@ -51,7 +51,7 @@ std::unique_ptr CreateDepacketizer( case RtpVideoFrameAssembler::kGeneric: return std::make_unique(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return nullptr; } } // namespace @@ -92,6 +92,11 @@ RtpVideoFrameAssembler::Impl::Impl( RtpVideoFrameAssembler::FrameVector RtpVideoFrameAssembler::Impl::InsertPacket( const RtpPacketReceived& rtp_packet) { + if (rtp_packet.payload_size() == 0) { + ClearOldData(rtp_packet.SequenceNumber()); + return UpdateWithPadding(rtp_packet.SequenceNumber()); + } + absl::optional parsed_payload = depacketizer_->Parse(rtp_packet.PayloadBuffer()); @@ -99,11 +104,6 @@ RtpVideoFrameAssembler::FrameVector RtpVideoFrameAssembler::Impl::InsertPacket( return {}; } - if (parsed_payload->video_payload.size() == 0) { - ClearOldData(rtp_packet.SequenceNumber()); - return UpdateWithPadding(rtp_packet.SequenceNumber()); - } - if (rtp_packet.HasExtension()) { if (!ParseDependenciesDescriptorExtension(rtp_packet, parsed_payload->video_header)) { @@ -187,7 +187,10 @@ RtpVideoFrameAssembler::Impl::FindReferences(RtpFrameVector frames) { for (auto& frame : frames) { auto complete_frames = reference_finder_.ManageFrame(std::move(frame)); for (std::unique_ptr& complete_frame : complete_frames) { - res.push_back(std::move(complete_frame)); + uint16_t rtp_seq_num_start = complete_frame->first_seq_num(); + uint16_t rtp_seq_num_end = complete_frame->last_seq_num(); + res.emplace_back(rtp_seq_num_start, rtp_seq_num_end, + std::move(complete_frame)); } } return res; @@ -199,8 +202,12 @@ RtpVideoFrameAssembler::Impl::UpdateWithPadding(uint16_t seq_num) { FindReferences(AssembleFrames(packet_buffer_.InsertPadding(seq_num))); auto ref_finder_update = reference_finder_.PaddingReceived(seq_num); - res.insert(res.end(), std::make_move_iterator(ref_finder_update.begin()), - std::make_move_iterator(ref_finder_update.end())); + for (std::unique_ptr& complete_frame : ref_finder_update) { + uint16_t rtp_seq_num_start = complete_frame->first_seq_num(); + uint16_t rtp_seq_num_end = complete_frame->last_seq_num(); + res.emplace_back(rtp_seq_num_start, rtp_seq_num_end, + std::move(complete_frame)); + } return res; } diff --git a/api/video/rtp_video_frame_assembler.h b/api/video/rtp_video_frame_assembler.h index 353942bdc8..83162cb818 100644 --- a/api/video/rtp_video_frame_assembler.h +++ b/api/video/rtp_video_frame_assembler.h @@ -13,6 +13,7 @@ #include #include +#include #include "absl/container/inlined_vector.h" #include "api/video/encoded_frame.h" @@ -26,9 +27,31 @@ namespace webrtc { // monotonic in decode order, dependencies are expressed as frame IDs. class RtpVideoFrameAssembler { public: + // The RtpVideoFrameAssembler should return "RTP frames", but for now there + // is no good class for this purpose. For now return an EncodedFrame bundled + // with some minimal RTP information. + class AssembledFrame { + public: + AssembledFrame(uint16_t rtp_seq_num_start, + uint16_t rtp_seq_num_end, + std::unique_ptr frame) + : rtp_seq_num_start_(rtp_seq_num_start), + rtp_seq_num_end_(rtp_seq_num_end), + frame_(std::move(frame)) {} + + uint16_t RtpSeqNumStart() const { return rtp_seq_num_start_; } + uint16_t RtpSeqNumEnd() const { return rtp_seq_num_end_; } + std::unique_ptr ExtractFrame() { return std::move(frame_); } + + private: + uint16_t rtp_seq_num_start_; + uint16_t rtp_seq_num_end_; + std::unique_ptr frame_; + }; + // FrameVector is just a vector-like type of std::unique_ptr. // The vector type may change without notice. - using FrameVector = absl::InlinedVector, 3>; + using FrameVector = absl::InlinedVector; enum PayloadFormat { kRaw, kH264, kVp8, kVp9, kAv1, kGeneric }; explicit RtpVideoFrameAssembler(PayloadFormat payload_format); diff --git a/api/video/rtp_video_frame_assembler_unittests.cc b/api/video/rtp_video_frame_assembler_unittests.cc index 916a83cd73..82defb8399 100644 --- a/api/video/rtp_video_frame_assembler_unittests.cc +++ b/api/video/rtp_video_frame_assembler_unittests.cc @@ -93,7 +93,7 @@ class PacketBuilder { return kVideoCodecGeneric; } } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return absl::nullopt; } @@ -105,6 +105,13 @@ class PacketBuilder { RtpPacketToSend packet_to_send_; }; +RtpPacketReceived PaddingPacket(uint16_t seq_num) { + RtpPacketReceived padding_packet; + padding_packet.SetSequenceNumber(seq_num); + padding_packet.SetPadding(224); + return padding_packet; +} + void AppendFrames(RtpVideoFrameAssembler::FrameVector from, RtpVideoFrameAssembler::FrameVector& to) { to.insert(to.end(), std::make_move_iterator(from.begin()), @@ -155,13 +162,15 @@ TEST(RtpVideoFrameAssembler, Vp8Packetization) { ASSERT_THAT(frames, SizeIs(2)); - EXPECT_THAT(frames[0]->Id(), Eq(10)); - EXPECT_THAT(References(frames[0]), IsEmpty()); - EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kKeyframePayload)); + auto first_frame = frames[0].ExtractFrame(); + EXPECT_THAT(first_frame->Id(), Eq(10)); + EXPECT_THAT(References(first_frame), IsEmpty()); + EXPECT_THAT(Payload(first_frame), ElementsAreArray(kKeyframePayload)); - EXPECT_THAT(frames[1]->Id(), Eq(11)); - EXPECT_THAT(References(frames[1]), UnorderedElementsAre(10)); - EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kDeltaframePayload)); + auto second_frame = frames[1].ExtractFrame(); + EXPECT_THAT(second_frame->Id(), Eq(11)); + EXPECT_THAT(References(second_frame), UnorderedElementsAre(10)); + EXPECT_THAT(Payload(second_frame), ElementsAreArray(kDeltaframePayload)); } TEST(RtpVideoFrameAssembler, Vp9Packetization) { @@ -194,13 +203,15 @@ TEST(RtpVideoFrameAssembler, Vp9Packetization) { ASSERT_THAT(frames, SizeIs(2)); - EXPECT_THAT(frames[0]->Id(), Eq(10)); - EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kPayload)); - EXPECT_THAT(References(frames[0]), IsEmpty()); + auto first_frame = frames[0].ExtractFrame(); + EXPECT_THAT(first_frame->Id(), Eq(10)); + EXPECT_THAT(Payload(first_frame), ElementsAreArray(kPayload)); + EXPECT_THAT(References(first_frame), IsEmpty()); - EXPECT_THAT(frames[1]->Id(), Eq(11)); - EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kPayload)); - EXPECT_THAT(References(frames[1]), UnorderedElementsAre(10)); + auto second_frame = frames[1].ExtractFrame(); + EXPECT_THAT(second_frame->Id(), Eq(11)); + EXPECT_THAT(Payload(second_frame), ElementsAreArray(kPayload)); + EXPECT_THAT(References(second_frame), UnorderedElementsAre(10)); } TEST(RtpVideoFrameAssembler, Av1Packetization) { @@ -232,13 +243,15 @@ TEST(RtpVideoFrameAssembler, Av1Packetization) { ASSERT_THAT(frames, SizeIs(2)); - EXPECT_THAT(frames[0]->Id(), Eq(20)); - EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kKeyframePayload)); - EXPECT_THAT(References(frames[0]), IsEmpty()); + auto first_frame = frames[0].ExtractFrame(); + EXPECT_THAT(first_frame->Id(), Eq(20)); + EXPECT_THAT(Payload(first_frame), ElementsAreArray(kKeyframePayload)); + EXPECT_THAT(References(first_frame), IsEmpty()); - EXPECT_THAT(frames[1]->Id(), Eq(21)); - EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kDeltaframePayload)); - EXPECT_THAT(References(frames[1]), UnorderedElementsAre(20)); + auto second_frame = frames[1].ExtractFrame(); + EXPECT_THAT(second_frame->Id(), Eq(21)); + EXPECT_THAT(Payload(second_frame), ElementsAreArray(kDeltaframePayload)); + EXPECT_THAT(References(second_frame), UnorderedElementsAre(20)); } TEST(RtpVideoFrameAssembler, RawPacketizationDependencyDescriptorExtension) { @@ -283,13 +296,15 @@ TEST(RtpVideoFrameAssembler, RawPacketizationDependencyDescriptorExtension) { ASSERT_THAT(frames, SizeIs(2)); - EXPECT_THAT(frames[0]->Id(), Eq(10)); - EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kPayload)); - EXPECT_THAT(References(frames[0]), IsEmpty()); + auto first_frame = frames[0].ExtractFrame(); + EXPECT_THAT(first_frame->Id(), Eq(10)); + EXPECT_THAT(Payload(first_frame), ElementsAreArray(kPayload)); + EXPECT_THAT(References(first_frame), IsEmpty()); - EXPECT_THAT(frames[1]->Id(), Eq(20)); - EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kPayload)); - EXPECT_THAT(References(frames[1]), UnorderedElementsAre(10)); + auto second_frame = frames[1].ExtractFrame(); + EXPECT_THAT(second_frame->Id(), Eq(20)); + EXPECT_THAT(Payload(second_frame), ElementsAreArray(kPayload)); + EXPECT_THAT(References(second_frame), UnorderedElementsAre(10)); } TEST(RtpVideoFrameAssembler, RawPacketizationGenericDescriptor00Extension) { @@ -322,13 +337,15 @@ TEST(RtpVideoFrameAssembler, RawPacketizationGenericDescriptor00Extension) { ASSERT_THAT(frames, SizeIs(2)); - EXPECT_THAT(frames[0]->Id(), Eq(100)); - EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kPayload)); - EXPECT_THAT(References(frames[0]), IsEmpty()); + auto first_frame = frames[0].ExtractFrame(); + EXPECT_THAT(first_frame->Id(), Eq(100)); + EXPECT_THAT(Payload(first_frame), ElementsAreArray(kPayload)); + EXPECT_THAT(References(first_frame), IsEmpty()); - EXPECT_THAT(frames[1]->Id(), Eq(102)); - EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kPayload)); - EXPECT_THAT(References(frames[1]), UnorderedElementsAre(100)); + auto second_frame = frames[1].ExtractFrame(); + EXPECT_THAT(second_frame->Id(), Eq(102)); + EXPECT_THAT(Payload(second_frame), ElementsAreArray(kPayload)); + EXPECT_THAT(References(second_frame), UnorderedElementsAre(100)); } TEST(RtpVideoFrameAssembler, RawPacketizationGenericPayloadDescriptor) { @@ -356,13 +373,15 @@ TEST(RtpVideoFrameAssembler, RawPacketizationGenericPayloadDescriptor) { ASSERT_THAT(frames, SizeIs(2)); - EXPECT_THAT(frames[0]->Id(), Eq(123)); - EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kPayload)); - EXPECT_THAT(References(frames[0]), IsEmpty()); + auto first_frame = frames[0].ExtractFrame(); + EXPECT_THAT(first_frame->Id(), Eq(123)); + EXPECT_THAT(Payload(first_frame), ElementsAreArray(kPayload)); + EXPECT_THAT(References(first_frame), IsEmpty()); - EXPECT_THAT(frames[1]->Id(), Eq(124)); - EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kPayload)); - EXPECT_THAT(References(frames[1]), UnorderedElementsAre(123)); + auto second_frame = frames[1].ExtractFrame(); + EXPECT_THAT(second_frame->Id(), Eq(124)); + EXPECT_THAT(Payload(second_frame), ElementsAreArray(kPayload)); + EXPECT_THAT(References(second_frame), UnorderedElementsAre(123)); } TEST(RtpVideoFrameAssembler, Padding) { @@ -389,29 +408,18 @@ TEST(RtpVideoFrameAssembler, Padding) { frames); ASSERT_THAT(frames, SizeIs(1)); + auto first_frame = frames[0].ExtractFrame(); + EXPECT_THAT(first_frame->Id(), Eq(123)); + EXPECT_THAT(Payload(first_frame), ElementsAreArray(kPayload)); + EXPECT_THAT(References(first_frame), IsEmpty()); - EXPECT_THAT(frames[0]->Id(), Eq(123)); - EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kPayload)); - EXPECT_THAT(References(frames[0]), IsEmpty()); - - // Padding packets have no bitstream data. An easy way to generate one is to - // build a normal packet and then simply remove the bitstream portion of the - // payload. - RtpPacketReceived padding_packet = PacketBuilder(PayloadFormat::kGeneric) - .WithPayload(kPayload) - .WithVideoHeader(video_header) - .WithSeqNum(124) - .Build(); - // The payload descriptor is one byte, keep it. - padding_packet.SetPayloadSize(1); - - AppendFrames(assembler.InsertPacket(padding_packet), frames); + AppendFrames(assembler.InsertPacket(PaddingPacket(/*seq_num=*/124)), frames); ASSERT_THAT(frames, SizeIs(2)); - - EXPECT_THAT(frames[1]->Id(), Eq(125)); - EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kPayload)); - EXPECT_THAT(References(frames[1]), UnorderedElementsAre(123)); + auto second_frame = frames[1].ExtractFrame(); + EXPECT_THAT(second_frame->Id(), Eq(125)); + EXPECT_THAT(Payload(second_frame), ElementsAreArray(kPayload)); + EXPECT_THAT(References(second_frame), UnorderedElementsAre(123)); } TEST(RtpVideoFrameAssembler, ClearOldPackets) { @@ -464,17 +472,8 @@ TEST(RtpVideoFrameAssembler, ClearOldPacketsWithPadding) { .Build()), SizeIs(1)); - // Padding packets have no bitstream data. An easy way to generate one is to - // build a normal packet and then simply remove the bitstream portion of the - // payload. - RtpPacketReceived padding_packet = PacketBuilder(PayloadFormat::kGeneric) - .WithPayload(kPayload) - .WithVideoHeader(video_header) - .WithSeqNum(2000) - .Build(); - // The payload descriptor is one byte, keep it. - padding_packet.SetPayloadSize(1); - EXPECT_THAT(assembler.InsertPacket(padding_packet), SizeIs(0)); + EXPECT_THAT(assembler.InsertPacket(PaddingPacket(/*seq_num=*/2000)), + SizeIs(0)); EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) .WithPayload(kPayload) @@ -491,5 +490,94 @@ TEST(RtpVideoFrameAssembler, ClearOldPacketsWithPadding) { SizeIs(1)); } +TEST(RtpVideoFrameAssembler, SeqNumStartAndSeqNumEndSet) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kGeneric); + RtpVideoFrameAssembler::FrameVector frames; + uint8_t kPayload[] = + "Some payload that will get split into two when packetized."; + + RTPVideoHeader video_header; + video_header.frame_type = VideoFrameType::kVideoFrameKey; + RtpPacketizer::PayloadSizeLimits limits; + limits.max_payload_len = sizeof(kPayload) - 1; + + auto packetizer = + RtpPacketizer::Create(kVideoCodecGeneric, kPayload, limits, video_header); + ASSERT_THAT(packetizer->NumPackets(), Eq(2U)); + + RtpPacketReceived::ExtensionManager extension_manager; + { + RtpPacketToSend send_packet(&extension_manager); + packetizer->NextPacket(&send_packet); + send_packet.SetSequenceNumber(123); + RtpPacketReceived received_packet(&extension_manager); + received_packet.Parse(send_packet.Buffer()); + assembler.InsertPacket(received_packet); + } + + { + RtpPacketToSend send_packet(&extension_manager); + packetizer->NextPacket(&send_packet); + send_packet.SetSequenceNumber(124); + RtpPacketReceived received_packet(&extension_manager); + received_packet.Parse(send_packet.Buffer()); + AppendFrames(assembler.InsertPacket(received_packet), frames); + } + + ASSERT_THAT(frames, SizeIs(1)); + EXPECT_THAT(frames[0].RtpSeqNumStart(), Eq(123)); + EXPECT_THAT(frames[0].RtpSeqNumEnd(), Eq(124)); +} + +TEST(RtpVideoFrameAssembler, SeqNumStartAndSeqNumEndSetWhenPaddingReceived) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kGeneric); + RtpVideoFrameAssembler::FrameVector frames; + uint8_t kPayload[] = + "Some payload that will get split into two when packetized."; + + RTPVideoHeader video_header; + video_header.frame_type = VideoFrameType::kVideoFrameKey; + + EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(121) + .Build()), + SizeIs(1)); + + video_header.frame_type = VideoFrameType::kVideoFrameDelta; + RtpPacketReceived::ExtensionManager extension_manager; + RtpPacketizer::PayloadSizeLimits limits; + limits.max_payload_len = sizeof(kPayload) - 1; + + auto packetizer = + RtpPacketizer::Create(kVideoCodecGeneric, kPayload, limits, video_header); + ASSERT_THAT(packetizer->NumPackets(), Eq(2U)); + + { + RtpPacketToSend send_packet(&extension_manager); + packetizer->NextPacket(&send_packet); + send_packet.SetSequenceNumber(123); + RtpPacketReceived received_packet(&extension_manager); + received_packet.Parse(send_packet.Buffer()); + assembler.InsertPacket(received_packet); + } + + { + RtpPacketToSend send_packet(&extension_manager); + packetizer->NextPacket(&send_packet); + send_packet.SetSequenceNumber(124); + RtpPacketReceived received_packet(&extension_manager); + received_packet.Parse(send_packet.Buffer()); + assembler.InsertPacket(received_packet); + } + + AppendFrames(assembler.InsertPacket(PaddingPacket(/*seq_num=*/122)), frames); + + ASSERT_THAT(frames, SizeIs(1)); + EXPECT_THAT(frames[0].RtpSeqNumStart(), Eq(123)); + EXPECT_THAT(frames[0].RtpSeqNumEnd(), Eq(124)); +} + } // namespace } // namespace webrtc diff --git a/api/video/test/BUILD.gn b/api/video/test/BUILD.gn index 1573e7848f..5b0d57b3c6 100644 --- a/api/video/test/BUILD.gn +++ b/api/video/test/BUILD.gn @@ -12,6 +12,7 @@ rtc_library("rtc_api_video_unittests") { testonly = true sources = [ "color_space_unittest.cc", + "i444_buffer_unittest.cc", "nv12_buffer_unittest.cc", "video_adaptation_counters_unittest.cc", "video_bitrate_allocation_unittest.cc", diff --git a/api/video/test/i444_buffer_unittest.cc b/api/video/test/i444_buffer_unittest.cc new file mode 100644 index 0000000000..9a1a9315aa --- /dev/null +++ b/api/video/test/i444_buffer_unittest.cc @@ -0,0 +1,112 @@ + +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "api/video/i444_buffer.h" + +#include "api/video/i420_buffer.h" +#include "test/frame_utils.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { + +namespace { +int GetY(rtc::scoped_refptr buf, int col, int row) { + return buf->DataY()[row * buf->StrideY() + col]; +} + +int GetU(rtc::scoped_refptr buf, int col, int row) { + return buf->DataU()[row * buf->StrideU() + col]; +} + +int GetV(rtc::scoped_refptr buf, int col, int row) { + return buf->DataV()[row * buf->StrideV() + col]; +} + +void FillI444Buffer(rtc::scoped_refptr buf) { + const uint8_t Y = 1; + const uint8_t U = 2; + const uint8_t V = 3; + for (int row = 0; row < buf->height(); ++row) { + for (int col = 0; col < buf->width(); ++col) { + buf->MutableDataY()[row * buf->StrideY() + col] = Y; + buf->MutableDataU()[row * buf->StrideU() + col] = U; + buf->MutableDataV()[row * buf->StrideV() + col] = V; + } + } +} + +} // namespace + +TEST(I444BufferTest, InitialData) { + constexpr int stride = 3; + constexpr int width = 3; + constexpr int height = 3; + + rtc::scoped_refptr i444_buffer(I444Buffer::Create(width, height)); + EXPECT_EQ(width, i444_buffer->width()); + EXPECT_EQ(height, i444_buffer->height()); + EXPECT_EQ(stride, i444_buffer->StrideY()); + EXPECT_EQ(stride, i444_buffer->StrideU()); + EXPECT_EQ(stride, i444_buffer->StrideV()); + EXPECT_EQ(3, i444_buffer->ChromaWidth()); + EXPECT_EQ(3, i444_buffer->ChromaHeight()); +} + +TEST(I444BufferTest, ReadPixels) { + constexpr int width = 3; + constexpr int height = 3; + + rtc::scoped_refptr i444_buffer(I444Buffer::Create(width, height)); + // Y = 1, U = 2, V = 3. + FillI444Buffer(i444_buffer); + for (int row = 0; row < height; row++) { + for (int col = 0; col < width; col++) { + EXPECT_EQ(1, GetY(i444_buffer, col, row)); + EXPECT_EQ(2, GetU(i444_buffer, col, row)); + EXPECT_EQ(3, GetV(i444_buffer, col, row)); + } + } +} + +TEST(I444BufferTest, ToI420) { + constexpr int width = 3; + constexpr int height = 3; + constexpr int size_y = width * height; + constexpr int size_u = (width + 1) / 2 * (height + 1) / 2; + constexpr int size_v = (width + 1) / 2 * (height + 1) / 2; + rtc::scoped_refptr reference(I420Buffer::Create(width, height)); + memset(reference->MutableDataY(), 8, size_y); + memset(reference->MutableDataU(), 4, size_u); + memset(reference->MutableDataV(), 2, size_v); + + rtc::scoped_refptr i444_buffer(I444Buffer::Create(width, height)); + // Convert the reference buffer to I444. + memset(i444_buffer->MutableDataY(), 8, size_y); + memset(i444_buffer->MutableDataU(), 4, size_y); + memset(i444_buffer->MutableDataV(), 2, size_y); + + // Confirm YUV values are as expected. + for (int row = 0; row < height; row++) { + for (int col = 0; col < width; col++) { + EXPECT_EQ(8, GetY(i444_buffer, col, row)); + EXPECT_EQ(4, GetU(i444_buffer, col, row)); + EXPECT_EQ(2, GetV(i444_buffer, col, row)); + } + } + + rtc::scoped_refptr i420_buffer(i444_buffer->ToI420()); + EXPECT_EQ(height, i420_buffer->height()); + EXPECT_EQ(width, i420_buffer->width()); + EXPECT_TRUE(test::FrameBufsEqual(reference, i420_buffer)); +} + +} // namespace webrtc diff --git a/api/video/video_bitrate_allocation.h b/api/video/video_bitrate_allocation.h index 56c0f64da3..4feffa2e66 100644 --- a/api/video/video_bitrate_allocation.h +++ b/api/video/video_bitrate_allocation.h @@ -50,8 +50,8 @@ class RTC_EXPORT VideoBitrateAllocation { // Get the sum of all the temporal layer for a specific spatial layer. uint32_t GetSpatialLayerSum(size_t spatial_index) const; - // Sum of bitrates of temporal layers, from layer 0 to |temporal_index| - // inclusive, of specified spatial layer |spatial_index|. Bitrates of lower + // Sum of bitrates of temporal layers, from layer 0 to `temporal_index` + // inclusive, of specified spatial layer `spatial_index`. Bitrates of lower // spatial layers are not included. uint32_t GetTemporalLayerSum(size_t spatial_index, size_t temporal_index) const; diff --git a/api/video/video_codec_type.h b/api/video/video_codec_type.h index 12dcfac1b9..74a4bc4258 100644 --- a/api/video/video_codec_type.h +++ b/api/video/video_codec_type.h @@ -13,7 +13,6 @@ namespace webrtc { -// GENERATED_JAVA_ENUM_PACKAGE: org.webrtc enum VideoCodecType { // There are various memset(..., 0, ...) calls in the code that rely on // kVideoCodecGeneric being zero. diff --git a/api/video/video_frame.h b/api/video/video_frame.h index e073fd5e42..512055d770 100644 --- a/api/video/video_frame.h +++ b/api/video/video_frame.h @@ -272,7 +272,7 @@ class RTC_EXPORT VideoFrame { // update_rect() will return a rectangle corresponding to the entire frame. absl::optional update_rect_; // Information about packets used to assemble this video frame. This is needed - // by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's + // by `SourceTracker` when the frame is delivered to the RTCRtpReceiver's // MediaStreamTrack, in order to implement getContributingSources(). See: // https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources RtpPacketInfos packet_infos_; diff --git a/api/video/video_frame_buffer.cc b/api/video/video_frame_buffer.cc index 7085010325..6c46f782a0 100644 --- a/api/video/video_frame_buffer.cc +++ b/api/video/video_frame_buffer.cc @@ -11,6 +11,7 @@ #include "api/video/video_frame_buffer.h" #include "api/video/i420_buffer.h" +#include "api/video/i444_buffer.h" #include "api/video/nv12_buffer.h" #include "rtc_base/checks.h" @@ -81,7 +82,7 @@ const char* VideoFrameBufferTypeToString(VideoFrameBuffer::Type type) { case VideoFrameBuffer::Type::kNV12: return "kNV12"; default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } } @@ -94,7 +95,7 @@ int I420BufferInterface::ChromaHeight() const { } rtc::scoped_refptr I420BufferInterface::ToI420() { - return this; + return rtc::scoped_refptr(this); } const I420BufferInterface* I420BufferInterface::GetI420() const { @@ -117,6 +118,19 @@ int I444BufferInterface::ChromaHeight() const { return height(); } +rtc::scoped_refptr I444BufferInterface::CropAndScale( + int offset_x, + int offset_y, + int crop_width, + int crop_height, + int scaled_width, + int scaled_height) { + rtc::scoped_refptr result = + I444Buffer::Create(scaled_width, scaled_height); + result->CropAndScaleFrom(*this, offset_x, offset_y, crop_width, crop_height); + return result; +} + VideoFrameBuffer::Type I010BufferInterface::type() const { return Type::kI010; } diff --git a/api/video/video_frame_buffer.h b/api/video/video_frame_buffer.h index 62adc204f6..6098a48117 100644 --- a/api/video/video_frame_buffer.h +++ b/api/video/video_frame_buffer.h @@ -46,6 +46,8 @@ class RTC_EXPORT VideoFrameBuffer : public rtc::RefCountInterface { // New frame buffer types will be added conservatively when there is an // opportunity to optimize the path between some pair of video source and // video sink. + // GENERATED_JAVA_ENUM_PACKAGE: org.webrtc + // GENERATED_JAVA_CLASS_NAME_OVERRIDE: VideoFrameBufferType enum class Type { kNative, kI420, @@ -67,6 +69,8 @@ class RTC_EXPORT VideoFrameBuffer : public rtc::RefCountInterface { // in another format, a conversion will take place. All implementations must // provide a fallback to I420 for compatibility with e.g. the internal WebRTC // software encoders. + // Conversion may fail, for example if reading the pixel data from a texture + // fails. If the conversion fails, nullptr is returned. virtual rtc::scoped_refptr ToI420() = 0; // GetI420() methods should return I420 buffer if conversion is trivial, i.e @@ -82,8 +86,8 @@ class RTC_EXPORT VideoFrameBuffer : public rtc::RefCountInterface { // A format specific scale function. Default implementation works by // converting to I420. But more efficient implementations may override it, // especially for kNative. - // First, the image is cropped to |crop_width| and |crop_height| and then - // scaled to |scaled_width| and |scaled_height|. + // First, the image is cropped to `crop_width` and `crop_height` and then + // scaled to `scaled_width` and `scaled_height`. virtual rtc::scoped_refptr CropAndScale(int offset_x, int offset_y, int crop_width, @@ -180,6 +184,13 @@ class I444BufferInterface : public PlanarYuv8Buffer { int ChromaWidth() const final; int ChromaHeight() const final; + rtc::scoped_refptr CropAndScale(int offset_x, + int offset_y, + int crop_width, + int crop_height, + int scaled_width, + int scaled_height) override; + protected: ~I444BufferInterface() override {} }; diff --git a/api/video/video_sink_interface.h b/api/video/video_sink_interface.h index 88cf9d924f..9c1f5f3214 100644 --- a/api/video/video_sink_interface.h +++ b/api/video/video_sink_interface.h @@ -11,6 +11,8 @@ #ifndef API_VIDEO_VIDEO_SINK_INTERFACE_H_ #define API_VIDEO_VIDEO_SINK_INTERFACE_H_ +#include "absl/types/optional.h" +#include "api/video_track_source_constraints.h" #include "rtc_base/checks.h" namespace rtc { @@ -25,6 +27,11 @@ class VideoSinkInterface { // Should be called by the source when it discards the frame due to rate // limiting. virtual void OnDiscardedFrame() {} + + // Called on the network thread when video constraints change. + // TODO(crbug/1255737): make pure virtual once downstream project adapts. + virtual void OnConstraintsChanged( + const webrtc::VideoTrackSourceConstraints& constraints) {} }; } // namespace rtc diff --git a/api/video/video_source_interface.h b/api/video/video_source_interface.h index 8b5823fc27..5eb4ebfd75 100644 --- a/api/video/video_source_interface.h +++ b/api/video/video_source_interface.h @@ -54,7 +54,7 @@ struct RTC_EXPORT VideoSinkWants { int max_framerate_fps = std::numeric_limits::max(); // Tells the source that the sink wants width and height of the video frames - // to be divisible by |resolution_alignment|. + // to be divisible by `resolution_alignment`. // For example: With I420, this value would be a multiple of 2. // Note that this field is unrelated to any horizontal or vertical stride // requirements the encoder has on the incoming video frame buffers. @@ -71,13 +71,13 @@ struct RTC_EXPORT VideoSinkWants { // to scaleResolutionDownBy or turning off simulcast or SVC layers. // // For example, we may capture at 720p and due to adaptation (e.g. applying - // |max_pixel_count| constraints) create webrtc::VideoFrames of size 480p, but + // `max_pixel_count` constraints) create webrtc::VideoFrames of size 480p, but // if we do scaleResolutionDownBy:2 then the only resolution we end up // encoding is 240p. In this case we still need to provide webrtc::VideoFrames // of size 480p but we can optimize internal buffers for 240p, avoiding // downsampling to 480p if possible. // - // Note that the |resolutions| can change while frames are in flight and + // Note that the `resolutions` can change while frames are in flight and // should only be used as a hint when constructing the webrtc::VideoFrame. std::vector resolutions; }; @@ -97,6 +97,10 @@ class VideoSourceInterface { // RemoveSink must guarantee that at the time the method returns, // there is no current and no future calls to VideoSinkInterface::OnFrame. virtual void RemoveSink(VideoSinkInterface* sink) = 0; + + // Request underlying source to capture a new frame. + // TODO(crbug/1255737): make pure virtual once downstream projects adapt. + virtual void RequestRefreshFrame() {} }; } // namespace rtc diff --git a/api/video/video_stream_decoder_create.h b/api/video/video_stream_decoder_create.h index 4958dc1084..9c898ec610 100644 --- a/api/video/video_stream_decoder_create.h +++ b/api/video/video_stream_decoder_create.h @@ -20,7 +20,7 @@ #include "api/video_codecs/sdp_video_format.h" namespace webrtc { -// The |decoder_settings| parameter is a map between: +// The `decoder_settings` parameter is a map between: // --> < @@ -30,4 +30,4 @@ - \ No newline at end of file + diff --git a/examples/aarproject/app/build.gradle b/examples/aarproject/app/build.gradle index b4f2acdbdd..d47dedb3ca 100644 --- a/examples/aarproject/app/build.gradle +++ b/examples/aarproject/app/build.gradle @@ -42,10 +42,11 @@ android { } dependencies { - implementation fileTree(dir: 'libs', include: ['*.jar']) + if (project.hasProperty('aarDir')) { + implementation fileTree(dir: project.aarDir, include: ['google-webrtc-*.aar']) + } implementation fileTree(dir: '../../androidapp/third_party/autobanh/lib', include: ['autobanh.jar']) - implementation 'com.android.support:appcompat-v7:26.1.0' - implementation 'org.webrtc:google-webrtc:1.0.+' + implementation 'androidx.annotation:annotation:1.2.0' testImplementation 'junit:junit:4.12' androidTestImplementation 'com.android.support.test:runner:1.0.1' androidTestImplementation 'com.android.support.test.espresso:espresso-core:3.0.1' diff --git a/examples/aarproject/gradle.properties b/examples/aarproject/gradle.properties index aac7c9b461..2e87c52f83 100644 --- a/examples/aarproject/gradle.properties +++ b/examples/aarproject/gradle.properties @@ -15,3 +15,8 @@ org.gradle.jvmargs=-Xmx1536m # This option should only be used with decoupled projects. More details, visit # http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects # org.gradle.parallel=true + +# AndroidX package structure to make it clearer which packages are bundled with the +# Android operating system, and which are packaged with your app's APK +# https://developer.android.com/topic/libraries/support-library/androidx-rn +android.useAndroidX=true diff --git a/examples/androidapp/res/layout/fragment_hud.xml b/examples/androidapp/res/layout/fragment_hud.xml index 20d26549bd..483e7ba456 100644 --- a/examples/androidapp/res/layout/fragment_hud.xml +++ b/examples/androidapp/res/layout/fragment_hud.xml @@ -15,7 +15,7 @@ android:layout_height="48dp"/> - - - - - - - - - - - - - - - - diff --git a/examples/androidapp/src/org/appspot/apprtc/AppRTCAudioManager.java b/examples/androidapp/src/org/appspot/apprtc/AppRTCAudioManager.java index c32ab964ad..2536b131a1 100644 --- a/examples/androidapp/src/org/appspot/apprtc/AppRTCAudioManager.java +++ b/examples/androidapp/src/org/appspot/apprtc/AppRTCAudioManager.java @@ -20,8 +20,8 @@ import android.media.AudioManager; import android.os.Build; import android.preference.PreferenceManager; -import android.support.annotation.Nullable; import android.util.Log; +import androidx.annotation.Nullable; import java.util.Collections; import java.util.HashSet; import java.util.Set; @@ -77,7 +77,7 @@ void onAudioDeviceChanged( // This device is changed automatically using a certain scheme where e.g. // a wired headset "wins" over speaker phone. It is also possible for a // user to explicitly select a device (and overrid any predefined scheme). - // See |userSelectedAudioDevice| for details. + // See `userSelectedAudioDevice` for details. private AudioDevice selectedAudioDevice; // Contains the user-selected audio device which overrides the predefined @@ -216,7 +216,7 @@ public void start(AudioManagerEvents audioManagerEvents) { // Create an AudioManager.OnAudioFocusChangeListener instance. audioFocusChangeListener = new AudioManager.OnAudioFocusChangeListener() { // Called on the listener to notify if the audio focus for this listener has been changed. - // The |focusChange| value indicates whether the focus was gained, whether the focus was lost, + // The `focusChange` value indicates whether the focus was gained, whether the focus was lost, // and whether that loss is transient, or whether the new focus holder will hold it for an // unknown amount of time. // TODO(henrika): possibly extend support of handling audio-focus changes. Only contains @@ -573,7 +573,7 @@ public void updateAudioDeviceState() { } else { // No wired headset and no Bluetooth, hence the audio-device list can contain speaker // phone (on a tablet), or speaker phone and earpiece (on mobile phone). - // |defaultAudioDevice| contains either AudioDevice.SPEAKER_PHONE or AudioDevice.EARPIECE + // `defaultAudioDevice` contains either AudioDevice.SPEAKER_PHONE or AudioDevice.EARPIECE // depending on the user's selection. newAudioDevice = defaultAudioDevice; } diff --git a/examples/androidapp/src/org/appspot/apprtc/AppRTCBluetoothManager.java b/examples/androidapp/src/org/appspot/apprtc/AppRTCBluetoothManager.java index 936b5495d5..e9077d8bd6 100644 --- a/examples/androidapp/src/org/appspot/apprtc/AppRTCBluetoothManager.java +++ b/examples/androidapp/src/org/appspot/apprtc/AppRTCBluetoothManager.java @@ -24,8 +24,8 @@ import android.os.Handler; import android.os.Looper; import android.os.Process; -import android.support.annotation.Nullable; import android.util.Log; +import androidx.annotation.Nullable; import java.util.List; import java.util.Set; import org.appspot.apprtc.util.AppRTCUtils; @@ -364,7 +364,7 @@ public void stopScoAudio() { * Use the BluetoothHeadset proxy object (controls the Bluetooth Headset * Service via IPC) to update the list of connected devices for the HEADSET * profile. The internal state will change to HEADSET_UNAVAILABLE or to - * HEADSET_AVAILABLE and |bluetoothDevice| will be mapped to the connected + * HEADSET_AVAILABLE and `bluetoothDevice` will be mapped to the connected * device if available. */ public void updateDevice() { diff --git a/examples/androidapp/src/org/appspot/apprtc/AppRTCClient.java b/examples/androidapp/src/org/appspot/apprtc/AppRTCClient.java index 5487fbabf8..d5b7b4338e 100644 --- a/examples/androidapp/src/org/appspot/apprtc/AppRTCClient.java +++ b/examples/androidapp/src/org/appspot/apprtc/AppRTCClient.java @@ -100,7 +100,7 @@ public SignalingParameters(List iceServers, boolean in /** * Callback interface for messages delivered on signaling channel. * - *

Methods are guaranteed to be invoked on the UI thread of |activity|. + *

Methods are guaranteed to be invoked on the UI thread of `activity`. */ interface SignalingEvents { /** diff --git a/examples/androidapp/src/org/appspot/apprtc/AppRTCProximitySensor.java b/examples/androidapp/src/org/appspot/apprtc/AppRTCProximitySensor.java index a29b7ef28e..5c73b4395c 100644 --- a/examples/androidapp/src/org/appspot/apprtc/AppRTCProximitySensor.java +++ b/examples/androidapp/src/org/appspot/apprtc/AppRTCProximitySensor.java @@ -16,8 +16,8 @@ import android.hardware.SensorEventListener; import android.hardware.SensorManager; import android.os.Build; -import android.support.annotation.Nullable; import android.util.Log; +import androidx.annotation.Nullable; import org.appspot.apprtc.util.AppRTCUtils; import org.webrtc.ThreadUtils; @@ -34,8 +34,8 @@ public class AppRTCProximitySensor implements SensorEventListener { private static final String TAG = "AppRTCProximitySensor"; // This class should be created, started and stopped on one thread - // (e.g. the main thread). We use |nonThreadSafe| to ensure that this is - // the case. Only active when |DEBUG| is set to true. + // (e.g. the main thread). We use `nonThreadSafe` to ensure that this is + // the case. Only active when `DEBUG` is set to true. private final ThreadUtils.ThreadChecker threadChecker = new ThreadUtils.ThreadChecker(); private final Runnable onSensorStateListener; diff --git a/examples/androidapp/src/org/appspot/apprtc/CallActivity.java b/examples/androidapp/src/org/appspot/apprtc/CallActivity.java index 10d2b6eca2..2da2073e2b 100644 --- a/examples/androidapp/src/org/appspot/apprtc/CallActivity.java +++ b/examples/androidapp/src/org/appspot/apprtc/CallActivity.java @@ -24,7 +24,6 @@ import android.os.Build; import android.os.Bundle; import android.os.Handler; -import android.support.annotation.Nullable; import android.util.DisplayMetrics; import android.util.Log; import android.view.View; @@ -32,6 +31,7 @@ import android.view.WindowManager; import android.view.WindowManager.LayoutParams; import android.widget.Toast; +import androidx.annotation.Nullable; import java.io.IOException; import java.lang.RuntimeException; import java.util.ArrayList; @@ -51,10 +51,10 @@ import org.webrtc.IceCandidate; import org.webrtc.Logging; import org.webrtc.PeerConnectionFactory; +import org.webrtc.RTCStatsReport; import org.webrtc.RendererCommon.ScalingType; import org.webrtc.ScreenCapturerAndroid; import org.webrtc.SessionDescription; -import org.webrtc.StatsReport; import org.webrtc.SurfaceViewRenderer; import org.webrtc.VideoCapturer; import org.webrtc.VideoFileRenderer; @@ -676,7 +676,7 @@ public void onClick(DialogInterface dialog, int id) { } } - // Log |msg| and Toast about it. + // Log `msg` and Toast about it. private void logAndToast(String msg) { Log.d(TAG, msg); if (logToast != null) { @@ -951,12 +951,12 @@ public void run() { public void onPeerConnectionClosed() {} @Override - public void onPeerConnectionStatsReady(final StatsReport[] reports) { + public void onPeerConnectionStatsReady(final RTCStatsReport report) { runOnUiThread(new Runnable() { @Override public void run() { if (!isError && connected) { - hudFragment.updateEncoderStatistics(reports); + hudFragment.updateEncoderStatistics(report); } } }); diff --git a/examples/androidapp/src/org/appspot/apprtc/ConnectActivity.java b/examples/androidapp/src/org/appspot/apprtc/ConnectActivity.java index f282fc89b2..7206c88498 100644 --- a/examples/androidapp/src/org/appspot/apprtc/ConnectActivity.java +++ b/examples/androidapp/src/org/appspot/apprtc/ConnectActivity.java @@ -22,7 +22,6 @@ import android.os.Build; import android.os.Bundle; import android.preference.PreferenceManager; -import android.support.annotation.Nullable; import android.util.Log; import android.view.ContextMenu; import android.view.KeyEvent; @@ -38,6 +37,7 @@ import android.widget.ImageButton; import android.widget.ListView; import android.widget.TextView; +import androidx.annotation.Nullable; import java.util.ArrayList; import java.util.Random; import org.json.JSONArray; diff --git a/examples/androidapp/src/org/appspot/apprtc/CpuMonitor.java b/examples/androidapp/src/org/appspot/apprtc/CpuMonitor.java index 001728bbc1..dd51ab2561 100644 --- a/examples/androidapp/src/org/appspot/apprtc/CpuMonitor.java +++ b/examples/androidapp/src/org/appspot/apprtc/CpuMonitor.java @@ -17,8 +17,8 @@ import android.os.BatteryManager; import android.os.Build; import android.os.SystemClock; -import android.support.annotation.Nullable; import android.util.Log; +import androidx.annotation.Nullable; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.FileNotFoundException; diff --git a/examples/androidapp/src/org/appspot/apprtc/DirectRTCClient.java b/examples/androidapp/src/org/appspot/apprtc/DirectRTCClient.java index 05b6edbff2..1b113e1398 100644 --- a/examples/androidapp/src/org/appspot/apprtc/DirectRTCClient.java +++ b/examples/androidapp/src/org/appspot/apprtc/DirectRTCClient.java @@ -10,8 +10,8 @@ package org.appspot.apprtc; -import android.support.annotation.Nullable; import android.util.Log; +import androidx.annotation.Nullable; import java.util.ArrayList; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -320,7 +320,7 @@ public void run() { }); } - // Put a |key|->|value| mapping in |json|. + // Put a `key`->`value` mapping in `json`. private static void jsonPut(JSONObject json, String key, Object value) { try { json.put(key, value); diff --git a/examples/androidapp/src/org/appspot/apprtc/HudFragment.java b/examples/androidapp/src/org/appspot/apprtc/HudFragment.java index ad6b37dfa5..94ca05549a 100644 --- a/examples/androidapp/src/org/appspot/apprtc/HudFragment.java +++ b/examples/androidapp/src/org/appspot/apprtc/HudFragment.java @@ -12,29 +12,20 @@ import android.app.Fragment; import android.os.Bundle; -import android.util.TypedValue; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ImageButton; import android.widget.TextView; - -import org.webrtc.StatsReport; - -import java.util.HashMap; -import java.util.Map; +import org.webrtc.RTCStats; +import org.webrtc.RTCStatsReport; /** * Fragment for HUD statistics display. */ public class HudFragment extends Fragment { - private TextView encoderStatView; - private TextView hudViewBwe; - private TextView hudViewConnection; - private TextView hudViewVideoSend; - private TextView hudViewVideoRecv; + private TextView statView; private ImageButton toggleDebugButton; - private boolean videoCallEnabled; private boolean displayHud; private volatile boolean isRunning; private CpuMonitor cpuMonitor; @@ -45,20 +36,15 @@ public View onCreateView( View controlView = inflater.inflate(R.layout.fragment_hud, container, false); // Create UI controls. - encoderStatView = controlView.findViewById(R.id.encoder_stat_call); - hudViewBwe = controlView.findViewById(R.id.hud_stat_bwe); - hudViewConnection = controlView.findViewById(R.id.hud_stat_connection); - hudViewVideoSend = controlView.findViewById(R.id.hud_stat_video_send); - hudViewVideoRecv = controlView.findViewById(R.id.hud_stat_video_recv); + statView = controlView.findViewById(R.id.hud_stat_call); toggleDebugButton = controlView.findViewById(R.id.button_toggle_debug); toggleDebugButton.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { if (displayHud) { - int visibility = - (hudViewBwe.getVisibility() == View.VISIBLE) ? View.INVISIBLE : View.VISIBLE; - hudViewsSetProperties(visibility); + statView.setVisibility( + statView.getVisibility() == View.VISIBLE ? View.INVISIBLE : View.VISIBLE); } } }); @@ -72,13 +58,11 @@ public void onStart() { Bundle args = getArguments(); if (args != null) { - videoCallEnabled = args.getBoolean(CallActivity.EXTRA_VIDEO_CALL, true); displayHud = args.getBoolean(CallActivity.EXTRA_DISPLAY_HUD, false); } int visibility = displayHud ? View.VISIBLE : View.INVISIBLE; - encoderStatView.setVisibility(visibility); + statView.setVisibility(View.INVISIBLE); toggleDebugButton.setVisibility(visibility); - hudViewsSetProperties(View.INVISIBLE); isRunning = true; } @@ -92,113 +76,27 @@ public void setCpuMonitor(CpuMonitor cpuMonitor) { this.cpuMonitor = cpuMonitor; } - private void hudViewsSetProperties(int visibility) { - hudViewBwe.setVisibility(visibility); - hudViewConnection.setVisibility(visibility); - hudViewVideoSend.setVisibility(visibility); - hudViewVideoRecv.setVisibility(visibility); - hudViewBwe.setTextSize(TypedValue.COMPLEX_UNIT_PT, 5); - hudViewConnection.setTextSize(TypedValue.COMPLEX_UNIT_PT, 5); - hudViewVideoSend.setTextSize(TypedValue.COMPLEX_UNIT_PT, 5); - hudViewVideoRecv.setTextSize(TypedValue.COMPLEX_UNIT_PT, 5); - } - - private Map getReportMap(StatsReport report) { - Map reportMap = new HashMap<>(); - for (StatsReport.Value value : report.values) { - reportMap.put(value.name, value.value); - } - return reportMap; - } - - public void updateEncoderStatistics(final StatsReport[] reports) { + public void updateEncoderStatistics(final RTCStatsReport report) { if (!isRunning || !displayHud) { return; } - StringBuilder encoderStat = new StringBuilder(128); - StringBuilder bweStat = new StringBuilder(); - StringBuilder connectionStat = new StringBuilder(); - StringBuilder videoSendStat = new StringBuilder(); - StringBuilder videoRecvStat = new StringBuilder(); - String fps = null; - String targetBitrate = null; - String actualBitrate = null; - - for (StatsReport report : reports) { - if (report.type.equals("ssrc") && report.id.contains("ssrc") && report.id.contains("send")) { - // Send video statistics. - Map reportMap = getReportMap(report); - String trackId = reportMap.get("googTrackId"); - if (trackId != null && trackId.contains(PeerConnectionClient.VIDEO_TRACK_ID)) { - fps = reportMap.get("googFrameRateSent"); - videoSendStat.append(report.id).append("\n"); - for (StatsReport.Value value : report.values) { - String name = value.name.replace("goog", ""); - videoSendStat.append(name).append("=").append(value.value).append("\n"); - } - } - } else if (report.type.equals("ssrc") && report.id.contains("ssrc") - && report.id.contains("recv")) { - // Receive video statistics. - Map reportMap = getReportMap(report); - // Check if this stat is for video track. - String frameWidth = reportMap.get("googFrameWidthReceived"); - if (frameWidth != null) { - videoRecvStat.append(report.id).append("\n"); - for (StatsReport.Value value : report.values) { - String name = value.name.replace("goog", ""); - videoRecvStat.append(name).append("=").append(value.value).append("\n"); - } - } - } else if (report.id.equals("bweforvideo")) { - // BWE statistics. - Map reportMap = getReportMap(report); - targetBitrate = reportMap.get("googTargetEncBitrate"); - actualBitrate = reportMap.get("googActualEncBitrate"); - - bweStat.append(report.id).append("\n"); - for (StatsReport.Value value : report.values) { - String name = value.name.replace("goog", "").replace("Available", ""); - bweStat.append(name).append("=").append(value.value).append("\n"); - } - } else if (report.type.equals("googCandidatePair")) { - // Connection statistics. - Map reportMap = getReportMap(report); - String activeConnection = reportMap.get("googActiveConnection"); - if (activeConnection != null && activeConnection.equals("true")) { - connectionStat.append(report.id).append("\n"); - for (StatsReport.Value value : report.values) { - String name = value.name.replace("goog", ""); - connectionStat.append(name).append("=").append(value.value).append("\n"); - } - } - } - } - hudViewBwe.setText(bweStat.toString()); - hudViewConnection.setText(connectionStat.toString()); - hudViewVideoSend.setText(videoSendStat.toString()); - hudViewVideoRecv.setText(videoRecvStat.toString()); - - if (videoCallEnabled) { - if (fps != null) { - encoderStat.append("Fps: ").append(fps).append("\n"); - } - if (targetBitrate != null) { - encoderStat.append("Target BR: ").append(targetBitrate).append("\n"); - } - if (actualBitrate != null) { - encoderStat.append("Actual BR: ").append(actualBitrate).append("\n"); - } - } + + StringBuilder sb = new StringBuilder(); if (cpuMonitor != null) { - encoderStat.append("CPU%: ") + sb.append("CPU%: ") .append(cpuMonitor.getCpuUsageCurrent()) .append("/") .append(cpuMonitor.getCpuUsageAverage()) .append(". Freq: ") - .append(cpuMonitor.getFrequencyScaleAverage()); + .append(cpuMonitor.getFrequencyScaleAverage()) + .append("\n"); } - encoderStatView.setText(encoderStat.toString()); + + for (RTCStats stat : report.getStatsMap().values()) { + sb.append(stat.toString()).append("\n"); + } + + statView.setText(sb.toString()); } } diff --git a/examples/androidapp/src/org/appspot/apprtc/PeerConnectionClient.java b/examples/androidapp/src/org/appspot/apprtc/PeerConnectionClient.java index b3282a6955..7bdce00b2f 100644 --- a/examples/androidapp/src/org/appspot/apprtc/PeerConnectionClient.java +++ b/examples/androidapp/src/org/appspot/apprtc/PeerConnectionClient.java @@ -13,8 +13,8 @@ import android.content.Context; import android.os.Environment; import android.os.ParcelFileDescriptor; -import android.support.annotation.Nullable; import android.util.Log; +import androidx.annotation.Nullable; import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; @@ -46,6 +46,7 @@ import org.webrtc.DefaultVideoEncoderFactory; import org.webrtc.EglBase; import org.webrtc.IceCandidate; +import org.webrtc.IceCandidateErrorEvent; import org.webrtc.Logging; import org.webrtc.MediaConstraints; import org.webrtc.MediaStream; @@ -54,6 +55,8 @@ import org.webrtc.PeerConnection.IceConnectionState; import org.webrtc.PeerConnection.PeerConnectionState; import org.webrtc.PeerConnectionFactory; +import org.webrtc.RTCStatsCollectorCallback; +import org.webrtc.RTCStatsReport; import org.webrtc.RtpParameters; import org.webrtc.RtpReceiver; import org.webrtc.RtpSender; @@ -62,8 +65,6 @@ import org.webrtc.SessionDescription; import org.webrtc.SoftwareVideoDecoderFactory; import org.webrtc.SoftwareVideoEncoderFactory; -import org.webrtc.StatsObserver; -import org.webrtc.StatsReport; import org.webrtc.SurfaceTextureHelper; import org.webrtc.VideoCapturer; import org.webrtc.VideoDecoderFactory; @@ -96,7 +97,6 @@ public class PeerConnectionClient { private static final String VIDEO_CODEC_H264_BASELINE = "H264 Baseline"; private static final String VIDEO_CODEC_H264_HIGH = "H264 High"; private static final String VIDEO_CODEC_AV1 = "AV1"; - private static final String VIDEO_CODEC_AV1_SDP_CODEC_NAME = "AV1X"; private static final String AUDIO_CODEC_OPUS = "opus"; private static final String AUDIO_CODEC_ISAC = "ISAC"; private static final String VIDEO_CODEC_PARAM_START_BITRATE = "x-google-start-bitrate"; @@ -312,7 +312,7 @@ public interface PeerConnectionEvents { /** * Callback fired once peer connection statistics is ready. */ - void onPeerConnectionStatsReady(final StatsReport[] reports); + void onPeerConnectionStatsReady(final RTCStatsReport report); /** * Callback fired once peer connection error happened. @@ -322,7 +322,7 @@ public interface PeerConnectionEvents { /** * Create a PeerConnectionClient with the specified parameters. PeerConnectionClient takes - * ownership of |eglBase|. + * ownership of `eglBase`. */ public PeerConnectionClient(Context appContext, EglBase eglBase, PeerConnectionParameters peerConnectionParameters, PeerConnectionEvents events) { @@ -443,6 +443,10 @@ private void createPeerConnectionFactoryInternal(PeerConnectionFactory.Options o decoderFactory = new SoftwareVideoDecoderFactory(); } + // Disable encryption for loopback calls. + if (peerConnectionParameters.loopback) { + options.disableEncryption = true; + } factory = PeerConnectionFactory.builder() .setOptions(options) .setAudioDeviceModule(adm) @@ -601,8 +605,6 @@ private void createPeerConnectionInternal() { rtcConfig.continualGatheringPolicy = PeerConnection.ContinualGatheringPolicy.GATHER_CONTINUALLY; // Use ECDSA encryption. rtcConfig.keyType = PeerConnection.KeyType.ECDSA; - // Enable DTLS for normal calls and disable for loopback calls. - rtcConfig.enableDtlsSrtp = !peerConnectionParameters.loopback; rtcConfig.sdpSemantics = PeerConnection.SdpSemantics.UNIFIED_PLAN; peerConnection = factory.createPeerConnection(rtcConfig, pcObserver); @@ -620,7 +622,7 @@ private void createPeerConnectionInternal() { isInitiator = false; // Set INFO libjingle logging. - // NOTE: this _must_ happen while |factory| is alive! + // NOTE: this _must_ happen while `factory` is alive! Logging.enableLogToDebugOutput(Logging.Severity.LS_INFO); List mediaStreamLabels = Collections.singletonList("ARDAMS"); @@ -747,20 +749,16 @@ public boolean isHDVideo() { return isVideoCallEnabled() && videoWidth * videoHeight >= 1280 * 720; } - @SuppressWarnings("deprecation") // TODO(sakal): getStats is deprecated. private void getStats() { if (peerConnection == null || isError) { return; } - boolean success = peerConnection.getStats(new StatsObserver() { + peerConnection.getStats(new RTCStatsCollectorCallback() { @Override - public void onComplete(final StatsReport[] reports) { - events.onPeerConnectionStatsReady(reports); + public void onStatsDelivered(RTCStatsReport report) { + events.onPeerConnectionStatsReady(report); } - }, null); - if (!success) { - Log.e(TAG, "getStats() returns false!"); - } + }); } public void enableStatsEvents(boolean enable, int periodMs) { @@ -989,7 +987,7 @@ private static String getSdpVideoCodecName(PeerConnectionParameters parameters) case VIDEO_CODEC_VP9: return VIDEO_CODEC_VP9; case VIDEO_CODEC_AV1: - return VIDEO_CODEC_AV1_SDP_CODEC_NAME; + return VIDEO_CODEC_AV1; case VIDEO_CODEC_H264_HIGH: case VIDEO_CODEC_H264_BASELINE: return VIDEO_CODEC_H264; @@ -1115,7 +1113,7 @@ private static String joinString( final List unpreferredPayloadTypes = new ArrayList<>(origLineParts.subList(3, origLineParts.size())); unpreferredPayloadTypes.removeAll(preferredPayloadTypes); - // Reconstruct the line with |preferredPayloadTypes| moved to the beginning of the payload + // Reconstruct the line with `preferredPayloadTypes` moved to the beginning of the payload // types. final List newLineParts = new ArrayList<>(); newLineParts.addAll(header); @@ -1131,7 +1129,7 @@ private static String preferCodec(String sdp, String codec, boolean isAudio) { Log.w(TAG, "No mediaDescription line, so can't prefer " + codec); return sdp; } - // A list with all the payload types with name |codec|. The payload types are integers in the + // A list with all the payload types with name `codec`. The payload types are integers in the // range 96-127, but they are stored as strings here. final List codecPayloadTypes = new ArrayList<>(); // a=rtpmap: / [/] @@ -1216,6 +1214,13 @@ public void onIceCandidate(final IceCandidate candidate) { executor.execute(() -> events.onIceCandidate(candidate)); } + @Override + public void onIceCandidateError(final IceCandidateErrorEvent event) { + Log.d(TAG, + "IceCandidateError address: " + event.address + ", port: " + event.port + ", url: " + + event.url + ", errorCode: " + event.errorCode + ", errorText: " + event.errorText); + } + @Override public void onIceCandidatesRemoved(final IceCandidate[] candidates) { executor.execute(() -> events.onIceCandidatesRemoved(candidates)); diff --git a/examples/androidapp/src/org/appspot/apprtc/RecordedAudioToFileController.java b/examples/androidapp/src/org/appspot/apprtc/RecordedAudioToFileController.java index 06943f7eac..9787852feb 100644 --- a/examples/androidapp/src/org/appspot/apprtc/RecordedAudioToFileController.java +++ b/examples/androidapp/src/org/appspot/apprtc/RecordedAudioToFileController.java @@ -12,8 +12,8 @@ import android.media.AudioFormat; import android.os.Environment; -import android.support.annotation.Nullable; import android.util.Log; +import androidx.annotation.Nullable; import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; diff --git a/examples/androidapp/src/org/appspot/apprtc/RoomParametersFetcher.java b/examples/androidapp/src/org/appspot/apprtc/RoomParametersFetcher.java index 2ad109306b..6a0f235528 100644 --- a/examples/androidapp/src/org/appspot/apprtc/RoomParametersFetcher.java +++ b/examples/androidapp/src/org/appspot/apprtc/RoomParametersFetcher.java @@ -159,6 +159,7 @@ private void roomHttpResponseParse(String response) { // Requests & returns a TURN ICE Server based on a request URL. Must be run // off the main thread! + @SuppressWarnings("UseNetworkAnnotations") private List requestTurnServers(String url) throws IOException, JSONException { List turnServers = new ArrayList<>(); diff --git a/examples/androidapp/src/org/appspot/apprtc/TCPChannelClient.java b/examples/androidapp/src/org/appspot/apprtc/TCPChannelClient.java index d3094e7df4..d869d7ca66 100644 --- a/examples/androidapp/src/org/appspot/apprtc/TCPChannelClient.java +++ b/examples/androidapp/src/org/appspot/apprtc/TCPChannelClient.java @@ -10,8 +10,8 @@ package org.appspot.apprtc; -import android.support.annotation.Nullable; import android.util.Log; +import androidx.annotation.Nullable; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; diff --git a/examples/androidapp/src/org/appspot/apprtc/UnhandledExceptionHandler.java b/examples/androidapp/src/org/appspot/apprtc/UnhandledExceptionHandler.java index 0b944dc35f..b256400119 100644 --- a/examples/androidapp/src/org/appspot/apprtc/UnhandledExceptionHandler.java +++ b/examples/androidapp/src/org/appspot/apprtc/UnhandledExceptionHandler.java @@ -66,7 +66,7 @@ public void onClick(DialogInterface dialog, int which) { }); } - // Returns the Message attached to the original Cause of |t|. + // Returns the Message attached to the original Cause of `t`. private static String getTopLevelCauseMessage(Throwable t) { Throwable topLevelCause = t; while (topLevelCause.getCause() != null) { @@ -75,8 +75,8 @@ private static String getTopLevelCauseMessage(Throwable t) { return topLevelCause.getMessage(); } - // Returns a human-readable String of the stacktrace in |t|, recursively - // through all Causes that led to |t|. + // Returns a human-readable String of the stacktrace in `t`, recursively + // through all Causes that led to `t`. private static String getRecursiveStackTrace(Throwable t) { StringWriter writer = new StringWriter(); t.printStackTrace(new PrintWriter(writer)); diff --git a/examples/androidapp/src/org/appspot/apprtc/WebSocketChannelClient.java b/examples/androidapp/src/org/appspot/apprtc/WebSocketChannelClient.java index af9a5f31ac..5fa410889a 100644 --- a/examples/androidapp/src/org/appspot/apprtc/WebSocketChannelClient.java +++ b/examples/androidapp/src/org/appspot/apprtc/WebSocketChannelClient.java @@ -11,8 +11,8 @@ package org.appspot.apprtc; import android.os.Handler; -import android.support.annotation.Nullable; import android.util.Log; +import androidx.annotation.Nullable; import de.tavendo.autobahn.WebSocket.WebSocketConnectionObserver; import de.tavendo.autobahn.WebSocketConnection; import de.tavendo.autobahn.WebSocketException; diff --git a/examples/androidapp/src/org/appspot/apprtc/WebSocketRTCClient.java b/examples/androidapp/src/org/appspot/apprtc/WebSocketRTCClient.java index 930f0ee16a..cbfdb21c91 100644 --- a/examples/androidapp/src/org/appspot/apprtc/WebSocketRTCClient.java +++ b/examples/androidapp/src/org/appspot/apprtc/WebSocketRTCClient.java @@ -12,8 +12,8 @@ import android.os.Handler; import android.os.HandlerThread; -import android.support.annotation.Nullable; import android.util.Log; +import androidx.annotation.Nullable; import org.appspot.apprtc.RoomParametersFetcher.RoomParametersFetcherEvents; import org.appspot.apprtc.WebSocketChannelClient.WebSocketChannelEvents; import org.appspot.apprtc.WebSocketChannelClient.WebSocketConnectionState; @@ -368,7 +368,7 @@ public void run() { }); } - // Put a |key|->|value| mapping in |json|. + // Put a `key`->`value` mapping in `json`. private static void jsonPut(JSONObject json, String key, Object value) { try { json.put(key, value); diff --git a/examples/androidapp/src/org/appspot/apprtc/util/AsyncHttpURLConnection.java b/examples/androidapp/src/org/appspot/apprtc/util/AsyncHttpURLConnection.java index 3a721b6229..93028ae783 100644 --- a/examples/androidapp/src/org/appspot/apprtc/util/AsyncHttpURLConnection.java +++ b/examples/androidapp/src/org/appspot/apprtc/util/AsyncHttpURLConnection.java @@ -53,6 +53,7 @@ public void send() { new Thread(this ::sendHttpMessage).start(); } + @SuppressWarnings("UseNetworkAnnotations") private void sendHttpMessage() { try { HttpURLConnection connection = (HttpURLConnection) new URL(url).openConnection(); diff --git a/examples/androidnativeapi/BUILD.gn b/examples/androidnativeapi/BUILD.gn index 9253c0bcd9..1c840f8248 100644 --- a/examples/androidnativeapi/BUILD.gn +++ b/examples/androidnativeapi/BUILD.gn @@ -21,7 +21,7 @@ if (is_android) { "//sdk/android:surfaceviewrenderer_java", "//sdk/android:video_api_java", "//sdk/android:video_java", - "//third_party/android_deps:com_android_support_support_annotations_java", + "//third_party/androidx:androidx_annotation_annotation_java", ] shared_libraries = [ ":examples_androidnativeapi_jni" ] diff --git a/examples/androidnativeapi/java/org/webrtc/examples/androidnativeapi/MainActivity.java b/examples/androidnativeapi/java/org/webrtc/examples/androidnativeapi/MainActivity.java index 1fb1728701..72fc0a686d 100644 --- a/examples/androidnativeapi/java/org/webrtc/examples/androidnativeapi/MainActivity.java +++ b/examples/androidnativeapi/java/org/webrtc/examples/androidnativeapi/MainActivity.java @@ -13,8 +13,8 @@ import android.app.Activity; import android.content.Context; import android.os.Bundle; -import android.support.annotation.Nullable; import android.widget.Button; +import androidx.annotation.Nullable; import org.webrtc.Camera1Enumerator; import org.webrtc.Camera2Enumerator; import org.webrtc.CameraEnumerator; diff --git a/examples/androidnativeapi/jni/android_call_client.cc b/examples/androidnativeapi/jni/android_call_client.cc index f38de24a3f..2c5e1af108 100644 --- a/examples/androidnativeapi/jni/android_call_client.cc +++ b/examples/androidnativeapi/jni/android_call_client.cc @@ -177,8 +177,10 @@ void AndroidCallClient::CreatePeerConnection() { webrtc::MutexLock lock(&pc_mutex_); webrtc::PeerConnectionInterface::RTCConfiguration config; config.sdp_semantics = webrtc::SdpSemantics::kUnifiedPlan; - // DTLS SRTP has to be disabled for loopback to work. - config.enable_dtls_srtp = false; + // Encryption has to be disabled for loopback to work. + webrtc::PeerConnectionFactoryInterface::Options options; + options.disable_encryption = true; + pcf_->SetOptions(options); webrtc::PeerConnectionDependencies deps(pc_observer_.get()); pc_ = pcf_->CreatePeerConnectionOrError(config, std::move(deps)).MoveValue(); @@ -206,7 +208,7 @@ void AndroidCallClient::CreatePeerConnection() { void AndroidCallClient::Connect() { webrtc::MutexLock lock(&pc_mutex_); - pc_->CreateOffer(new rtc::RefCountedObject(pc_), + pc_->CreateOffer(rtc::make_ref_counted(pc_), webrtc::PeerConnectionInterface::RTCOfferAnswerOptions()); } @@ -256,14 +258,14 @@ void CreateOfferObserver::OnSuccess(webrtc::SessionDescriptionInterface* desc) { // Ownership of desc was transferred to us, now we transfer it forward. pc_->SetLocalDescription( - new rtc::RefCountedObject(), desc); + rtc::make_ref_counted(), desc); // Generate a fake answer. std::unique_ptr answer( webrtc::CreateSessionDescription(webrtc::SdpType::kAnswer, sdp)); pc_->SetRemoteDescription( std::move(answer), - new rtc::RefCountedObject()); + rtc::make_ref_counted()); } void CreateOfferObserver::OnFailure(webrtc::RTCError error) { diff --git a/examples/androidtests/gradle_project_test.py b/examples/androidtests/gradle_project_test.py deleted file mode 100644 index 097232d07f..0000000000 --- a/examples/androidtests/gradle_project_test.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. -# -# Use of this source code is governed by a BSD-style license -# that can be found in the LICENSE file in the root of the source -# tree. An additional intellectual property rights grant can be found -# in the file PATENTS. All contributing project authors may -# be found in the AUTHORS file in the root of the source tree. -""" -This scripts tests creating an Android Studio project using the -generate_gradle.py script and making a debug build using it. - -It expect to be given the webrtc output build directory as the first argument -all other arguments are optional. -""" - -import argparse -import logging -import os -import shutil -import subprocess -import sys -import tempfile - -SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) -SRC_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir)) -GENERATE_GRADLE_SCRIPT = os.path.join( - SRC_DIR, 'build/android/gradle/generate_gradle.py') -GRADLEW_BIN = os.path.join(SCRIPT_DIR, 'third_party/gradle/gradlew') - - -def _RunCommand(argv, cwd=SRC_DIR, **kwargs): - logging.info('Running %r', argv) - subprocess.check_call(argv, cwd=cwd, **kwargs) - - -def _ParseArgs(): - parser = argparse.ArgumentParser( - description='Test generating Android gradle project.') - parser.add_argument('build_dir_android', - help='The path to the build directory for Android.') - parser.add_argument('--project_dir', - help='A temporary directory to put the output.') - - args = parser.parse_args() - return args - - -def main(): - logging.basicConfig(level=logging.INFO) - args = _ParseArgs() - - project_dir = args.project_dir - if not project_dir: - project_dir = tempfile.mkdtemp() - - output_dir = os.path.abspath(args.build_dir_android) - project_dir = os.path.abspath(project_dir) - - try: - env = os.environ.copy() - env['PATH'] = os.pathsep.join([ - os.path.join(SRC_DIR, 'third_party', 'depot_tools'), - env.get('PATH', '') - ]) - _RunCommand([ - GENERATE_GRADLE_SCRIPT, '--output-directory', output_dir, - '--target', '//examples:AppRTCMobile', '--project-dir', - project_dir, '--use-gradle-process-resources', '--split-projects' - ], - env=env) - _RunCommand([GRADLEW_BIN, 'assembleDebug'], project_dir) - finally: - # Do not delete temporary directory if user specified it manually. - if not args.project_dir: - shutil.rmtree(project_dir, True) - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/examples/androidtests/src/org/appspot/apprtc/test/PeerConnectionClientTest.java b/examples/androidtests/src/org/appspot/apprtc/test/PeerConnectionClientTest.java index 99e0ff6531..ac3fb23a6e 100644 --- a/examples/androidtests/src/org/appspot/apprtc/test/PeerConnectionClientTest.java +++ b/examples/androidtests/src/org/appspot/apprtc/test/PeerConnectionClientTest.java @@ -15,9 +15,9 @@ import android.os.Build; import android.support.test.InstrumentationRegistry; -import android.support.test.filters.SmallTest; import android.support.test.runner.AndroidJUnit4; import android.util.Log; +import androidx.test.filters.SmallTest; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CountDownLatch; @@ -39,8 +39,8 @@ import org.webrtc.IceCandidate; import org.webrtc.PeerConnection; import org.webrtc.PeerConnectionFactory; +import org.webrtc.RTCStatsReport; import org.webrtc.SessionDescription; -import org.webrtc.StatsReport; import org.webrtc.VideoCapturer; import org.webrtc.VideoFrame; import org.webrtc.VideoSink; @@ -208,7 +208,7 @@ public void onPeerConnectionError(String description) { } @Override - public void onPeerConnectionStatsReady(StatsReport[] reports) {} + public void onPeerConnectionStatsReady(final RTCStatsReport report) {} // Helper wait functions. private boolean waitForLocalDescription(int timeoutMs) throws InterruptedException { diff --git a/examples/androidvoip/jni/android_voip_client.cc b/examples/androidvoip/jni/android_voip_client.cc index 95d3ed407f..9f17ed0913 100644 --- a/examples/androidvoip/jni/android_voip_client.cc +++ b/examples/androidvoip/jni/android_voip_client.cc @@ -41,7 +41,6 @@ namespace { #define RUN_ON_VOIP_THREAD(method, ...) \ if (!voip_thread_->IsCurrent()) { \ voip_thread_->PostTask( \ - RTC_FROM_HERE, \ std::bind(&AndroidVoipClient::method, this, ##__VA_ARGS__)); \ return; \ } \ @@ -61,10 +60,10 @@ rtc::IPAddress QueryDefaultLocalAddress(int family) { RTC_DCHECK(thread->socketserver() != nullptr); RTC_DCHECK(family == AF_INET || family == AF_INET6); - std::unique_ptr socket( - thread->socketserver()->CreateAsyncSocket(family, SOCK_DGRAM)); + std::unique_ptr socket( + thread->socketserver()->CreateSocket(family, SOCK_DGRAM)); if (!socket) { - RTC_LOG_ERR(LERROR) << "Socket creation failed"; + RTC_LOG_ERR(LS_ERROR) << "Socket creation failed"; return rtc::IPAddress(); } @@ -112,7 +111,7 @@ int GetPayloadType(const std::string& codec_name) { return static_cast(PayloadType::kIlbc); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return -1; } @@ -228,7 +227,7 @@ void AndroidVoipClient::SetEncoder( const std::string& chosen_encoder = webrtc::JavaToNativeString(env, j_encoder_string); voip_thread_->PostTask( - RTC_FROM_HERE, [this, chosen_encoder] { SetEncoder(chosen_encoder); }); + [this, chosen_encoder] { SetEncoder(chosen_encoder); }); } void AndroidVoipClient::SetDecoders(const std::vector& decoders) { @@ -258,7 +257,7 @@ void AndroidVoipClient::SetDecoders( webrtc::JavaListToNativeVector( env, j_decoder_strings, &webrtc::JavaToNativeString); voip_thread_->PostTask( - RTC_FROM_HERE, [this, chosen_decoders] { SetDecoders(chosen_decoders); }); + [this, chosen_decoders] { SetDecoders(chosen_decoders); }); } void AndroidVoipClient::SetLocalAddress(const std::string& ip_address, @@ -275,7 +274,7 @@ void AndroidVoipClient::SetLocalAddress( jint j_port_number_int) { const std::string& ip_address = webrtc::JavaToNativeString(env, j_ip_address_string); - voip_thread_->PostTask(RTC_FROM_HERE, [this, ip_address, j_port_number_int] { + voip_thread_->PostTask([this, ip_address, j_port_number_int] { SetLocalAddress(ip_address, j_port_number_int); }); } @@ -294,7 +293,7 @@ void AndroidVoipClient::SetRemoteAddress( jint j_port_number_int) { const std::string& ip_address = webrtc::JavaToNativeString(env, j_ip_address_string); - voip_thread_->PostTask(RTC_FROM_HERE, [this, ip_address, j_port_number_int] { + voip_thread_->PostTask([this, ip_address, j_port_number_int] { SetRemoteAddress(ip_address, j_port_number_int); }); } @@ -308,7 +307,7 @@ void AndroidVoipClient::StartSession(JNIEnv* env) { rtp_socket_.reset(rtc::AsyncUDPSocket::Create(voip_thread_->socketserver(), rtp_local_address_)); if (!rtp_socket_) { - RTC_LOG_ERR(LERROR) << "Socket creation failed"; + RTC_LOG_ERR(LS_ERROR) << "Socket creation failed"; Java_VoipClient_onStartSessionCompleted(env_, j_voip_client_, /*isSuccessful=*/false); return; @@ -319,7 +318,7 @@ void AndroidVoipClient::StartSession(JNIEnv* env) { rtcp_socket_.reset(rtc::AsyncUDPSocket::Create(voip_thread_->socketserver(), rtcp_local_address_)); if (!rtcp_socket_) { - RTC_LOG_ERR(LERROR) << "Socket creation failed"; + RTC_LOG_ERR(LS_ERROR) << "Socket creation failed"; Java_VoipClient_onStartSessionCompleted(env_, j_voip_client_, /*isSuccessful=*/false); return; @@ -431,10 +430,9 @@ bool AndroidVoipClient::SendRtp(const uint8_t* packet, size_t length, const webrtc::PacketOptions& options) { std::vector packet_copy(packet, packet + length); - voip_thread_->PostTask(RTC_FROM_HERE, - [this, packet_copy = std::move(packet_copy)] { - SendRtpPacket(packet_copy); - }); + voip_thread_->PostTask([this, packet_copy = std::move(packet_copy)] { + SendRtpPacket(packet_copy); + }); return true; } @@ -450,10 +448,9 @@ void AndroidVoipClient::SendRtcpPacket( bool AndroidVoipClient::SendRtcp(const uint8_t* packet, size_t length) { std::vector packet_copy(packet, packet + length); - voip_thread_->PostTask(RTC_FROM_HERE, - [this, packet_copy = std::move(packet_copy)] { - SendRtcpPacket(packet_copy); - }); + voip_thread_->PostTask([this, packet_copy = std::move(packet_copy)] { + SendRtcpPacket(packet_copy); + }); return true; } @@ -476,10 +473,9 @@ void AndroidVoipClient::OnSignalReadRTPPacket(rtc::AsyncPacketSocket* socket, const rtc::SocketAddress& addr, const int64_t& timestamp) { std::vector packet_copy(rtp_packet, rtp_packet + size); - voip_thread_->PostTask(RTC_FROM_HERE, - [this, packet_copy = std::move(packet_copy)] { - ReadRTPPacket(packet_copy); - }); + voip_thread_->PostTask([this, packet_copy = std::move(packet_copy)] { + ReadRTPPacket(packet_copy); + }); } void AndroidVoipClient::ReadRTCPPacket( @@ -502,10 +498,9 @@ void AndroidVoipClient::OnSignalReadRTCPPacket(rtc::AsyncPacketSocket* socket, const rtc::SocketAddress& addr, const int64_t& timestamp) { std::vector packet_copy(rtcp_packet, rtcp_packet + size); - voip_thread_->PostTask(RTC_FROM_HERE, - [this, packet_copy = std::move(packet_copy)] { - ReadRTCPPacket(packet_copy); - }); + voip_thread_->PostTask([this, packet_copy = std::move(packet_copy)] { + ReadRTCPPacket(packet_copy); + }); } static jlong JNI_VoipClient_CreateClient( diff --git a/examples/androidvoip/jni/android_voip_client.h b/examples/androidvoip/jni/android_voip_client.h index bfca7e8b79..8e1edd5ef9 100644 --- a/examples/androidvoip/jni/android_voip_client.h +++ b/examples/androidvoip/jni/android_voip_client.h @@ -147,8 +147,8 @@ class AndroidVoipClient : public webrtc::Transport, // Overloaded methods having native C++ variables as arguments. void SetEncoder(const std::string& encoder); void SetDecoders(const std::vector& decoders); - void SetLocalAddress(const std::string& ip_address, const int port_number); - void SetRemoteAddress(const std::string& ip_address, const int port_number); + void SetLocalAddress(const std::string& ip_address, int port_number); + void SetRemoteAddress(const std::string& ip_address, int port_number); // Methods to send and receive RTP/RTCP packets. Takes in a // copy of a packet as a vector to prolong the lifetime of diff --git a/examples/objc/AppRTCMobile/ARDAppClient.h b/examples/objc/AppRTCMobile/ARDAppClient.h index 8e124ed925..91d2cef1ce 100644 --- a/examples/objc/AppRTCMobile/ARDAppClient.h +++ b/examples/objc/AppRTCMobile/ARDAppClient.h @@ -63,7 +63,7 @@ typedef NS_ENUM(NSInteger, ARDAppClientState) { // class should only be called from the main queue. @interface ARDAppClient : NSObject -// If |shouldGetStats| is true, stats will be reported in 1s intervals through +// If `shouldGetStats` is true, stats will be reported in 1s intervals through // the delegate. @property(nonatomic, assign) BOOL shouldGetStats; @property(nonatomic, readonly) ARDAppClientState state; @@ -75,8 +75,8 @@ typedef NS_ENUM(NSInteger, ARDAppClientState) { - (instancetype)initWithDelegate:(id)delegate; // Establishes a connection with the AppRTC servers for the given room id. -// |settings| is an object containing settings such as video codec for the call. -// If |isLoopback| is true, the call will connect to itself. +// `settings` is an object containing settings such as video codec for the call. +// If `isLoopback` is true, the call will connect to itself. - (void)connectToRoomWithId:(NSString *)roomId settings:(ARDSettingsModel *)settings isLoopback:(BOOL)isLoopback; diff --git a/examples/objc/AppRTCMobile/ARDAppClient.m b/examples/objc/AppRTCMobile/ARDAppClient.m index fa6a960a54..4420972598 100644 --- a/examples/objc/AppRTCMobile/ARDAppClient.m +++ b/examples/objc/AppRTCMobile/ARDAppClient.m @@ -13,6 +13,7 @@ #import "sdk/objc/api/peerconnection/RTCAudioTrack.h" #import "sdk/objc/api/peerconnection/RTCConfiguration.h" #import "sdk/objc/api/peerconnection/RTCFileLogger.h" +#import "sdk/objc/api/peerconnection/RTCIceCandidateErrorEvent.h" #import "sdk/objc/api/peerconnection/RTCIceServer.h" #import "sdk/objc/api/peerconnection/RTCMediaConstraints.h" #import "sdk/objc/api/peerconnection/RTCMediaStream.h" @@ -64,7 +65,7 @@ static int const kKbpsMultiplier = 1000; // We need a proxy to NSTimer because it causes a strong retain cycle. When -// using the proxy, |invalidate| must be called before it properly deallocs. +// using the proxy, `invalidate` must be called before it properly deallocs. @interface ARDTimerProxy : NSObject - (instancetype)initWithInterval:(NSTimeInterval)interval @@ -425,6 +426,17 @@ - (void)peerConnection:(RTC_OBJC_TYPE(RTCPeerConnection) *)peerConnection }); } +- (void)peerConnection:(RTC_OBJC_TYPE(RTCPeerConnection) *)peerConnection + didFailToGatherIceCandidate:(RTC_OBJC_TYPE(RTCIceCandidateErrorEvent) *)event { + RTCLog(@"Failed to gather ICE candidate. address: %@, port: %d, url: %@, errorCode: %d, " + @"errorText: %@", + event.address, + event.port, + event.url, + event.errorCode, + event.errorText); +} + - (void)peerConnection:(RTC_OBJC_TYPE(RTCPeerConnection) *)peerConnection didRemoveIceCandidates:(NSArray *)candidates { dispatch_async(dispatch_get_main_queue(), ^{ diff --git a/examples/objc/AppRTCMobile/ARDSettingsModel.m b/examples/objc/AppRTCMobile/ARDSettingsModel.m index c628f0fde5..9e709b0553 100644 --- a/examples/objc/AppRTCMobile/ARDSettingsModel.m +++ b/examples/objc/AppRTCMobile/ARDSettingsModel.m @@ -77,6 +77,7 @@ - (BOOL)storeVideoResolutionSetting:(NSString *)resolution { - (RTC_OBJC_TYPE(RTCVideoCodecInfo) *)currentVideoCodecSettingFromStore { [self registerStoreDefaults]; NSData *codecData = [[self settingsStore] videoCodec]; +#if defined(WEBRTC_IOS) || __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_13 Class expectedClass = [RTC_OBJC_TYPE(RTCVideoCodecInfo) class]; NSError *error; RTC_OBJC_TYPE(RTCVideoCodecInfo) *videoCodecSetting = @@ -85,6 +86,9 @@ - (BOOL)storeVideoResolutionSetting:(NSString *)resolution { return videoCodecSetting; } return nil; +#else + return [NSKeyedUnarchiver unarchiveObjectWithData:codecData]; +#endif } - (BOOL)storeVideoCodecSetting:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)videoCodec { @@ -92,15 +96,20 @@ - (BOOL)storeVideoCodecSetting:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)videoCodec { return NO; } +#if defined(WEBRTC_IOS) || __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_13 NSError *error; NSData *codecData = [NSKeyedArchiver archivedDataWithRootObject:videoCodec requiringSecureCoding:NO error:&error]; - if (!error) { - [[self settingsStore] setVideoCodec:codecData]; - return YES; + if (error) { + return NO; } - return NO; +#else + NSData *codecData = [NSKeyedArchiver archivedDataWithRootObject:videoCodec]; +#endif + + [[self settingsStore] setVideoCodec:codecData]; + return YES; } - (nullable NSNumber *)currentMaxBitrateSettingFromStore { @@ -179,18 +188,24 @@ - (int)videoResolutionComponentAtIndex:(int)index inString:(NSString *)resolutio } - (void)registerStoreDefaults { +#if defined(WEBRTC_IOS) || __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_13 NSError *error; NSData *codecData = [NSKeyedArchiver archivedDataWithRootObject:[self defaultVideoCodecSetting] requiringSecureCoding:NO error:&error]; - if (!error) { - [ARDSettingsStore setDefaultsForVideoResolution:[self defaultVideoResolutionSetting] - videoCodec:codecData - bitrate:nil - audioOnly:NO - createAecDump:NO - useManualAudioConfig:YES]; + if (error) { + return; } +#else + NSData *codecData = [NSKeyedArchiver archivedDataWithRootObject:[self defaultVideoCodecSetting]]; +#endif + + [ARDSettingsStore setDefaultsForVideoResolution:[self defaultVideoResolutionSetting] + videoCodec:codecData + bitrate:nil + audioOnly:NO + createAecDump:NO + useManualAudioConfig:YES]; } @end NS_ASSUME_NONNULL_END diff --git a/examples/objc/AppRTCMobile/ios/ARDMainView.m b/examples/objc/AppRTCMobile/ios/ARDMainView.m index c3bd24a7a0..d9521060eb 100644 --- a/examples/objc/AppRTCMobile/ios/ARDMainView.m +++ b/examples/objc/AppRTCMobile/ios/ARDMainView.m @@ -64,7 +64,7 @@ - (NSString *)roomText { - (BOOL)textFieldShouldReturn:(UITextField *)textField { // There is no other control that can take focus, so manually resign focus - // when return (Join) is pressed to trigger |textFieldDidEndEditing|. + // when return (Join) is pressed to trigger `textFieldDidEndEditing`. [textField resignFirstResponder]; return YES; } diff --git a/examples/objc/AppRTCMobile/third_party/SocketRocket/SRWebSocket.m b/examples/objc/AppRTCMobile/third_party/SocketRocket/SRWebSocket.m index ad7b99a4b2..ab0d1b89bc 100644 --- a/examples/objc/AppRTCMobile/third_party/SocketRocket/SRWebSocket.m +++ b/examples/objc/AppRTCMobile/third_party/SocketRocket/SRWebSocket.m @@ -1478,7 +1478,7 @@ - (void)stream:(NSStream *)aStream handleEvent:(NSStreamEvent)eventCode; case NSStreamEventHasBytesAvailable: { SRFastLog(@"NSStreamEventHasBytesAvailable %@", aStream); - const int bufferSize = 2048; + enum EnumType : int { bufferSize = 2048 }; uint8_t buffer[bufferSize]; while (self->_inputStream.hasBytesAvailable) { diff --git a/examples/objcnativeapi/objc/objc_call_client.mm b/examples/objcnativeapi/objc/objc_call_client.mm index 419203eb62..081b5bc44b 100644 --- a/examples/objcnativeapi/objc/objc_call_client.mm +++ b/examples/objcnativeapi/objc/objc_call_client.mm @@ -141,8 +141,10 @@ webrtc::MutexLock lock(&pc_mutex_); webrtc::PeerConnectionInterface::RTCConfiguration config; config.sdp_semantics = webrtc::SdpSemantics::kUnifiedPlan; - // DTLS SRTP has to be disabled for loopback to work. - config.enable_dtls_srtp = false; + // Encryption has to be disabled for loopback to work. + webrtc::PeerConnectionFactoryInterface::Options options; + options.disable_encryption = true; + pcf_->SetOptions(options); webrtc::PeerConnectionDependencies pc_dependencies(pc_observer_.get()); pc_ = pcf_->CreatePeerConnectionOrError(config, std::move(pc_dependencies)).MoveValue(); RTC_LOG(LS_INFO) << "PeerConnection created: " << pc_; @@ -166,7 +168,7 @@ void ObjCCallClient::Connect() { webrtc::MutexLock lock(&pc_mutex_); - pc_->CreateOffer(new rtc::RefCountedObject(pc_), + pc_->CreateOffer(rtc::make_ref_counted(pc_), webrtc::PeerConnectionInterface::RTCOfferAnswerOptions()); } @@ -212,13 +214,13 @@ RTC_LOG(LS_INFO) << "Created offer: " << sdp; // Ownership of desc was transferred to us, now we transfer it forward. - pc_->SetLocalDescription(new rtc::RefCountedObject(), desc); + pc_->SetLocalDescription(rtc::make_ref_counted(), desc); // Generate a fake answer. std::unique_ptr answer( webrtc::CreateSessionDescription(webrtc::SdpType::kAnswer, sdp)); pc_->SetRemoteDescription(std::move(answer), - new rtc::RefCountedObject()); + rtc::make_ref_counted()); } void CreateOfferObserver::OnFailure(webrtc::RTCError error) { diff --git a/examples/peerconnection/client/conductor.cc b/examples/peerconnection/client/conductor.cc index 005a9d6ddf..93e95b6583 100644 --- a/examples/peerconnection/client/conductor.cc +++ b/examples/peerconnection/client/conductor.cc @@ -59,12 +59,12 @@ class DummySetSessionDescriptionObserver : public webrtc::SetSessionDescriptionObserver { public: static DummySetSessionDescriptionObserver* Create() { - return new rtc::RefCountedObject(); + return rtc::make_ref_counted(); } - virtual void OnSuccess() { RTC_LOG(INFO) << __FUNCTION__; } + virtual void OnSuccess() { RTC_LOG(LS_INFO) << __FUNCTION__; } virtual void OnFailure(webrtc::RTCError error) { - RTC_LOG(INFO) << __FUNCTION__ << " " << ToString(error.type()) << ": " - << error.message(); + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << ToString(error.type()) << ": " + << error.message(); } }; @@ -85,8 +85,7 @@ class CapturerTrackSource : public webrtc::VideoTrackSource { capturer = absl::WrapUnique( webrtc::test::VcmCapturer::Create(kWidth, kHeight, kFps, i)); if (capturer) { - return new rtc::RefCountedObject( - std::move(capturer)); + return rtc::make_ref_counted(std::move(capturer)); } } @@ -130,9 +129,13 @@ bool Conductor::InitializePeerConnection() { RTC_DCHECK(!peer_connection_factory_); RTC_DCHECK(!peer_connection_); + if (!signaling_thread_.get()) { + signaling_thread_ = rtc::Thread::CreateWithSocketServer(); + signaling_thread_->Start(); + } peer_connection_factory_ = webrtc::CreatePeerConnectionFactory( nullptr /* network_thread */, nullptr /* worker_thread */, - nullptr /* signaling_thread */, nullptr /* default_adm */, + signaling_thread_.get(), nullptr /* default_adm */, webrtc::CreateBuiltinAudioEncoderFactory(), webrtc::CreateBuiltinAudioDecoderFactory(), webrtc::CreateBuiltinVideoEncoderFactory(), @@ -146,7 +149,7 @@ bool Conductor::InitializePeerConnection() { return false; } - if (!CreatePeerConnection(/*dtls=*/true)) { + if (!CreatePeerConnection()) { main_wnd_->MessageBox("Error", "CreatePeerConnection failed", true); DeletePeerConnection(); } @@ -161,23 +164,28 @@ bool Conductor::ReinitializePeerConnectionForLoopback() { std::vector> senders = peer_connection_->GetSenders(); peer_connection_ = nullptr; - if (CreatePeerConnection(/*dtls=*/false)) { + // Loopback is only possible if encryption is disabled. + webrtc::PeerConnectionFactoryInterface::Options options; + options.disable_encryption = true; + peer_connection_factory_->SetOptions(options); + if (CreatePeerConnection()) { for (const auto& sender : senders) { peer_connection_->AddTrack(sender->track(), sender->stream_ids()); } peer_connection_->CreateOffer( this, webrtc::PeerConnectionInterface::RTCOfferAnswerOptions()); } + options.disable_encryption = false; + peer_connection_factory_->SetOptions(options); return peer_connection_ != nullptr; } -bool Conductor::CreatePeerConnection(bool dtls) { +bool Conductor::CreatePeerConnection() { RTC_DCHECK(peer_connection_factory_); RTC_DCHECK(!peer_connection_); webrtc::PeerConnectionInterface::RTCConfiguration config; config.sdp_semantics = webrtc::SdpSemantics::kUnifiedPlan; - config.enable_dtls_srtp = dtls; webrtc::PeerConnectionInterface::IceServer server; server.uri = GetPeerConnectionString(); config.servers.push_back(server); @@ -212,23 +220,23 @@ void Conductor::OnAddTrack( rtc::scoped_refptr receiver, const std::vector>& streams) { - RTC_LOG(INFO) << __FUNCTION__ << " " << receiver->id(); + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id(); main_wnd_->QueueUIThreadCallback(NEW_TRACK_ADDED, receiver->track().release()); } void Conductor::OnRemoveTrack( rtc::scoped_refptr receiver) { - RTC_LOG(INFO) << __FUNCTION__ << " " << receiver->id(); + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id(); main_wnd_->QueueUIThreadCallback(TRACK_REMOVED, receiver->track().release()); } void Conductor::OnIceCandidate(const webrtc::IceCandidateInterface* candidate) { - RTC_LOG(INFO) << __FUNCTION__ << " " << candidate->sdp_mline_index(); + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << candidate->sdp_mline_index(); // For loopback test. To save some connecting delay. if (loopback_) { if (!peer_connection_->AddIceCandidate(candidate)) { - RTC_LOG(WARNING) << "Failed to apply the received candidate"; + RTC_LOG(LS_WARNING) << "Failed to apply the received candidate"; } return; } @@ -252,12 +260,12 @@ void Conductor::OnIceCandidate(const webrtc::IceCandidateInterface* candidate) { // void Conductor::OnSignedIn() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; main_wnd_->SwitchToPeerList(client_->peers()); } void Conductor::OnDisconnected() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; DeletePeerConnection(); @@ -266,16 +274,16 @@ void Conductor::OnDisconnected() { } void Conductor::OnPeerConnected(int id, const std::string& name) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; // Refresh the list if we're showing it. if (main_wnd_->current_ui() == MainWindow::LIST_PEERS) main_wnd_->SwitchToPeerList(client_->peers()); } void Conductor::OnPeerDisconnected(int id) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; if (id == peer_id_) { - RTC_LOG(INFO) << "Our peer disconnected"; + RTC_LOG(LS_INFO) << "Our peer disconnected"; main_wnd_->QueueUIThreadCallback(PEER_CONNECTION_CLOSED, NULL); } else { // Refresh the list if we're showing it. @@ -299,7 +307,7 @@ void Conductor::OnMessageFromPeer(int peer_id, const std::string& message) { } } else if (peer_id != peer_id_) { RTC_DCHECK(peer_id_ != -1); - RTC_LOG(WARNING) + RTC_LOG(LS_WARNING) << "Received a message from unknown peer while already in a " "conversation with a different peer."; return; @@ -308,7 +316,7 @@ void Conductor::OnMessageFromPeer(int peer_id, const std::string& message) { Json::Reader reader; Json::Value jmessage; if (!reader.parse(message, jmessage)) { - RTC_LOG(WARNING) << "Received unknown message. " << message; + RTC_LOG(LS_WARNING) << "Received unknown message. " << message; return; } std::string type_str; @@ -337,19 +345,21 @@ void Conductor::OnMessageFromPeer(int peer_id, const std::string& message) { std::string sdp; if (!rtc::GetStringFromJsonObject(jmessage, kSessionDescriptionSdpName, &sdp)) { - RTC_LOG(WARNING) << "Can't parse received session description message."; + RTC_LOG(LS_WARNING) + << "Can't parse received session description message."; return; } webrtc::SdpParseError error; std::unique_ptr session_description = webrtc::CreateSessionDescription(type, sdp, &error); if (!session_description) { - RTC_LOG(WARNING) << "Can't parse received session description message. " - "SdpParseError was: " - << error.description; + RTC_LOG(LS_WARNING) + << "Can't parse received session description message. " + "SdpParseError was: " + << error.description; return; } - RTC_LOG(INFO) << " Received session description :" << message; + RTC_LOG(LS_INFO) << " Received session description :" << message; peer_connection_->SetRemoteDescription( DummySetSessionDescriptionObserver::Create(), session_description.release()); @@ -366,23 +376,23 @@ void Conductor::OnMessageFromPeer(int peer_id, const std::string& message) { !rtc::GetIntFromJsonObject(jmessage, kCandidateSdpMlineIndexName, &sdp_mlineindex) || !rtc::GetStringFromJsonObject(jmessage, kCandidateSdpName, &sdp)) { - RTC_LOG(WARNING) << "Can't parse received message."; + RTC_LOG(LS_WARNING) << "Can't parse received message."; return; } webrtc::SdpParseError error; std::unique_ptr candidate( webrtc::CreateIceCandidate(sdp_mid, sdp_mlineindex, sdp, &error)); if (!candidate.get()) { - RTC_LOG(WARNING) << "Can't parse received candidate message. " - "SdpParseError was: " - << error.description; + RTC_LOG(LS_WARNING) << "Can't parse received candidate message. " + "SdpParseError was: " + << error.description; return; } if (!peer_connection_->AddIceCandidate(candidate.get())) { - RTC_LOG(WARNING) << "Failed to apply the received candidate"; + RTC_LOG(LS_WARNING) << "Failed to apply the received candidate"; return; } - RTC_LOG(INFO) << " Received candidate :" << message; + RTC_LOG(LS_INFO) << " Received candidate :" << message; } } @@ -466,7 +476,7 @@ void Conductor::AddTracks() { } void Conductor::DisconnectFromCurrentPeer() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; if (peer_connection_.get()) { client_->SendHangUp(peer_id_); DeletePeerConnection(); @@ -479,7 +489,7 @@ void Conductor::DisconnectFromCurrentPeer() { void Conductor::UIThreadCallback(int msg_id, void* data) { switch (msg_id) { case PEER_CONNECTION_CLOSED: - RTC_LOG(INFO) << "PEER_CONNECTION_CLOSED"; + RTC_LOG(LS_INFO) << "PEER_CONNECTION_CLOSED"; DeletePeerConnection(); if (main_wnd_->IsWindow()) { @@ -494,7 +504,7 @@ void Conductor::UIThreadCallback(int msg_id, void* data) { break; case SEND_MESSAGE_TO_PEER: { - RTC_LOG(INFO) << "SEND_MESSAGE_TO_PEER"; + RTC_LOG(LS_INFO) << "SEND_MESSAGE_TO_PEER"; std::string* msg = reinterpret_cast(data); if (msg) { // For convenience, we always run the message through the queue. @@ -538,7 +548,7 @@ void Conductor::UIThreadCallback(int msg_id, void* data) { } default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); break; } } @@ -570,7 +580,7 @@ void Conductor::OnSuccess(webrtc::SessionDescriptionInterface* desc) { } void Conductor::OnFailure(webrtc::RTCError error) { - RTC_LOG(LERROR) << ToString(error.type()) << ": " << error.message(); + RTC_LOG(LS_ERROR) << ToString(error.type()) << ": " << error.message(); } void Conductor::SendMessage(const std::string& json_object) { diff --git a/examples/peerconnection/client/conductor.h b/examples/peerconnection/client/conductor.h index 3c06857a05..80617d3cf4 100644 --- a/examples/peerconnection/client/conductor.h +++ b/examples/peerconnection/client/conductor.h @@ -21,6 +21,7 @@ #include "api/peer_connection_interface.h" #include "examples/peerconnection/client/main_wnd.h" #include "examples/peerconnection/client/peer_connection_client.h" +#include "rtc_base/thread.h" namespace webrtc { class VideoCaptureModule; @@ -53,7 +54,7 @@ class Conductor : public webrtc::PeerConnectionObserver, ~Conductor(); bool InitializePeerConnection(); bool ReinitializePeerConnectionForLoopback(); - bool CreatePeerConnection(bool dtls); + bool CreatePeerConnection(); void DeletePeerConnection(); void EnsureStreamingUI(); void AddTracks(); @@ -122,6 +123,7 @@ class Conductor : public webrtc::PeerConnectionObserver, int peer_id_; bool loopback_; + std::unique_ptr signaling_thread_; rtc::scoped_refptr peer_connection_; rtc::scoped_refptr peer_connection_factory_; diff --git a/examples/peerconnection/client/linux/main.cc b/examples/peerconnection/client/linux/main.cc index ccca7b1c7c..47f4f3618e 100644 --- a/examples/peerconnection/client/linux/main.cc +++ b/examples/peerconnection/client/linux/main.cc @@ -101,8 +101,7 @@ int main(int argc, char* argv[]) { rtc::InitializeSSL(); // Must be constructed after we set the socketserver. PeerConnectionClient client; - rtc::scoped_refptr conductor( - new rtc::RefCountedObject(&client, &wnd)); + auto conductor = rtc::make_ref_counted(&client, &wnd); socket_server.set_client(&client); socket_server.set_conductor(conductor); diff --git a/examples/peerconnection/client/linux/main_wnd.cc b/examples/peerconnection/client/linux/main_wnd.cc index 7dcfa89d6a..e9b6a514b1 100644 --- a/examples/peerconnection/client/linux/main_wnd.cc +++ b/examples/peerconnection/client/linux/main_wnd.cc @@ -252,7 +252,7 @@ bool GtkMainWnd::Destroy() { } void GtkMainWnd::SwitchToConnectUI() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(IsWindow()); RTC_DCHECK(vbox_ == NULL); @@ -308,7 +308,7 @@ void GtkMainWnd::SwitchToConnectUI() { } void GtkMainWnd::SwitchToPeerList(const Peers& peers) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; if (!peer_list_) { gtk_container_set_border_width(GTK_CONTAINER(window_), 0); @@ -345,7 +345,7 @@ void GtkMainWnd::SwitchToPeerList(const Peers& peers) { } void GtkMainWnd::SwitchToStreamingUI() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(draw_area_ == NULL); @@ -514,7 +514,7 @@ void GtkMainWnd::Draw(GtkWidget* widget, cairo_t* cr) { cairo_fill(cr); cairo_surface_destroy(surface); #else - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); #endif } diff --git a/examples/peerconnection/client/main.cc b/examples/peerconnection/client/main.cc index e209171116..32bc52bda4 100644 --- a/examples/peerconnection/client/main.cc +++ b/examples/peerconnection/client/main.cc @@ -23,7 +23,6 @@ #include "examples/peerconnection/client/main_wnd.h" #include "examples/peerconnection/client/peer_connection_client.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/ssl_adapter.h" #include "rtc_base/string_utils.h" // For ToUtf8 #include "rtc_base/win32_socket_init.h" @@ -40,6 +39,9 @@ class WindowsCommandLineArguments { public: WindowsCommandLineArguments(); + WindowsCommandLineArguments(const WindowsCommandLineArguments&) = delete; + WindowsCommandLineArguments& operator=(WindowsCommandLineArguments&) = delete; + int argc() { return argv_.size(); } char** argv() { return argv_.data(); } @@ -48,9 +50,6 @@ class WindowsCommandLineArguments { std::vector args_; // Pointers, to get layout compatible with char** argv. std::vector argv_; - - private: - RTC_DISALLOW_COPY_AND_ASSIGN(WindowsCommandLineArguments); }; WindowsCommandLineArguments::WindowsCommandLineArguments() { @@ -101,14 +100,13 @@ int PASCAL wWinMain(HINSTANCE instance, MainWnd wnd(server.c_str(), absl::GetFlag(FLAGS_port), absl::GetFlag(FLAGS_autoconnect), absl::GetFlag(FLAGS_autocall)); if (!wnd.Create()) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return -1; } rtc::InitializeSSL(); PeerConnectionClient client; - rtc::scoped_refptr conductor( - new rtc::RefCountedObject(&client, &wnd)); + auto conductor = rtc::make_ref_counted(&client, &wnd); // Main loop. MSG msg; diff --git a/examples/peerconnection/client/peer_connection_client.cc b/examples/peerconnection/client/peer_connection_client.cc index 9bf2a0fecc..c0de4ffb5d 100644 --- a/examples/peerconnection/client/peer_connection_client.cc +++ b/examples/peerconnection/client/peer_connection_client.cc @@ -15,10 +15,6 @@ #include "rtc_base/logging.h" #include "rtc_base/net_helpers.h" -#ifdef WIN32 -#include "rtc_base/win32_socket_server.h" -#endif - namespace { // This is our magical hangup signal. @@ -26,18 +22,10 @@ const char kByeMessage[] = "BYE"; // Delay between server connection retries, in milliseconds const int kReconnectDelay = 2000; -rtc::AsyncSocket* CreateClientSocket(int family) { -#ifdef WIN32 - rtc::Win32Socket* sock = new rtc::Win32Socket(); - sock->CreateT(family, SOCK_STREAM); - return sock; -#elif defined(WEBRTC_POSIX) +rtc::Socket* CreateClientSocket(int family) { rtc::Thread* thread = rtc::Thread::Current(); RTC_DCHECK(thread != NULL); - return thread->socketserver()->CreateAsyncSocket(family, SOCK_STREAM); -#else -#error Platform not supported. -#endif + return thread->socketserver()->CreateSocket(family, SOCK_STREAM); } } // namespace @@ -89,7 +77,7 @@ void PeerConnectionClient::Connect(const std::string& server, RTC_DCHECK(!client_name.empty()); if (state_ != NOT_CONNECTED) { - RTC_LOG(WARNING) + RTC_LOG(LS_WARNING) << "The client must not be connected before you can call Connect()"; callback_->OnServerConnectionFailure(); return; @@ -227,14 +215,14 @@ bool PeerConnectionClient::ConnectControlSocket() { return true; } -void PeerConnectionClient::OnConnect(rtc::AsyncSocket* socket) { +void PeerConnectionClient::OnConnect(rtc::Socket* socket) { RTC_DCHECK(!onconnect_data_.empty()); size_t sent = socket->Send(onconnect_data_.c_str(), onconnect_data_.length()); RTC_DCHECK(sent == onconnect_data_.length()); onconnect_data_.clear(); } -void PeerConnectionClient::OnHangingGetConnect(rtc::AsyncSocket* socket) { +void PeerConnectionClient::OnHangingGetConnect(rtc::Socket* socket) { char buffer[1024]; snprintf(buffer, sizeof(buffer), "GET /wait?peer_id=%i HTTP/1.0\r\n\r\n", my_id_); @@ -283,7 +271,7 @@ bool PeerConnectionClient::GetHeaderValue(const std::string& data, return false; } -bool PeerConnectionClient::ReadIntoBuffer(rtc::AsyncSocket* socket, +bool PeerConnectionClient::ReadIntoBuffer(rtc::Socket* socket, std::string* data, size_t* content_length) { char buffer[0xffff]; @@ -297,7 +285,7 @@ bool PeerConnectionClient::ReadIntoBuffer(rtc::AsyncSocket* socket, bool ret = false; size_t i = data->find("\r\n\r\n"); if (i != std::string::npos) { - RTC_LOG(INFO) << "Headers received"; + RTC_LOG(LS_INFO) << "Headers received"; if (GetHeaderValue(*data, i, "\r\nContent-Length: ", content_length)) { size_t total_response_size = (i + 4) + *content_length; if (data->length() >= total_response_size) { @@ -321,7 +309,7 @@ bool PeerConnectionClient::ReadIntoBuffer(rtc::AsyncSocket* socket, return ret; } -void PeerConnectionClient::OnRead(rtc::AsyncSocket* socket) { +void PeerConnectionClient::OnRead(rtc::Socket* socket) { size_t content_length = 0; if (ReadIntoBuffer(socket, &control_data_, &content_length)) { size_t peer_id = 0, eoh = 0; @@ -373,8 +361,8 @@ void PeerConnectionClient::OnRead(rtc::AsyncSocket* socket) { } } -void PeerConnectionClient::OnHangingGetRead(rtc::AsyncSocket* socket) { - RTC_LOG(INFO) << __FUNCTION__; +void PeerConnectionClient::OnHangingGetRead(rtc::Socket* socket) { + RTC_LOG(LS_INFO) << __FUNCTION__; size_t content_length = 0; if (ReadIntoBuffer(socket, ¬ification_data_, &content_length)) { size_t peer_id = 0, eoh = 0; @@ -471,8 +459,8 @@ bool PeerConnectionClient::ParseServerResponse(const std::string& response, return true; } -void PeerConnectionClient::OnClose(rtc::AsyncSocket* socket, int err) { - RTC_LOG(INFO) << __FUNCTION__; +void PeerConnectionClient::OnClose(rtc::Socket* socket, int err) { + RTC_LOG(LS_INFO) << __FUNCTION__; socket->Close(); @@ -491,7 +479,7 @@ void PeerConnectionClient::OnClose(rtc::AsyncSocket* socket, int err) { } } else { if (socket == control_socket_.get()) { - RTC_LOG(WARNING) << "Connection refused; retrying in 2 seconds"; + RTC_LOG(LS_WARNING) << "Connection refused; retrying in 2 seconds"; rtc::Thread::Current()->PostDelayed(RTC_FROM_HERE, kReconnectDelay, this, 0); } else { diff --git a/examples/peerconnection/client/peer_connection_client.h b/examples/peerconnection/client/peer_connection_client.h index d7ae91343d..00d2192681 100644 --- a/examples/peerconnection/client/peer_connection_client.h +++ b/examples/peerconnection/client/peer_connection_client.h @@ -73,8 +73,8 @@ class PeerConnectionClient : public sigslot::has_slots<>, void Close(); void InitSocketSignals(); bool ConnectControlSocket(); - void OnConnect(rtc::AsyncSocket* socket); - void OnHangingGetConnect(rtc::AsyncSocket* socket); + void OnConnect(rtc::Socket* socket); + void OnHangingGetConnect(rtc::Socket* socket); void OnMessageFromPeer(int peer_id, const std::string& message); // Quick and dirty support for parsing HTTP header values. @@ -89,13 +89,13 @@ class PeerConnectionClient : public sigslot::has_slots<>, std::string* value); // Returns true if the whole response has been read. - bool ReadIntoBuffer(rtc::AsyncSocket* socket, + bool ReadIntoBuffer(rtc::Socket* socket, std::string* data, size_t* content_length); - void OnRead(rtc::AsyncSocket* socket); + void OnRead(rtc::Socket* socket); - void OnHangingGetRead(rtc::AsyncSocket* socket); + void OnHangingGetRead(rtc::Socket* socket); // Parses a single line entry in the form ",," bool ParseEntry(const std::string& entry, @@ -110,15 +110,15 @@ class PeerConnectionClient : public sigslot::has_slots<>, size_t* peer_id, size_t* eoh); - void OnClose(rtc::AsyncSocket* socket, int err); + void OnClose(rtc::Socket* socket, int err); void OnResolveResult(rtc::AsyncResolverInterface* resolver); PeerConnectionClientObserver* callback_; rtc::SocketAddress server_address_; rtc::AsyncResolver* resolver_; - std::unique_ptr control_socket_; - std::unique_ptr hanging_get_; + std::unique_ptr control_socket_; + std::unique_ptr hanging_get_; std::string onconnect_data_; std::string control_data_; std::string notification_data_; diff --git a/examples/peerconnection/server/data_socket.cc b/examples/peerconnection/server/data_socket.cc index 2d595a0e86..c57d992658 100644 --- a/examples/peerconnection/server/data_socket.cc +++ b/examples/peerconnection/server/data_socket.cc @@ -269,8 +269,12 @@ bool DataSocket::ParseContentLengthAndType(const char* headers, size_t length) { bool ListeningSocket::Listen(unsigned short port) { RTC_DCHECK(valid()); int enabled = 1; - setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR, - reinterpret_cast(&enabled), sizeof(enabled)); + if (setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR, + reinterpret_cast(&enabled), + sizeof(enabled)) != 0) { + printf("setsockopt failed\n"); + return false; + } struct sockaddr_in addr = {0}; addr.sin_family = AF_INET; addr.sin_addr.s_addr = htonl(INADDR_ANY); diff --git a/examples/peerconnection/server/data_socket.h b/examples/peerconnection/server/data_socket.h index 4429bc5a00..57ad5b9aee 100644 --- a/examples/peerconnection/server/data_socket.h +++ b/examples/peerconnection/server/data_socket.h @@ -37,6 +37,8 @@ class SocketBase { public: SocketBase() : socket_(INVALID_SOCKET) {} explicit SocketBase(NativeSocket socket) : socket_(socket) {} + SocketBase(SocketBase& other) = delete; + SocketBase& operator=(const SocketBase& other) = delete; ~SocketBase() { Close(); } NativeSocket socket() const { return socket_; } @@ -97,14 +99,14 @@ class DataSocket : public SocketBase { // Send a raw buffer of bytes. bool Send(const std::string& data) const; - // Send an HTTP response. The |status| should start with a valid HTTP + // Send an HTTP response. The `status` should start with a valid HTTP // response code, followed by a string. E.g. "200 OK". - // If |connection_close| is set to true, an extra "Connection: close" HTTP - // header will be included. |content_type| is the mime content type, not + // If `connection_close` is set to true, an extra "Connection: close" HTTP + // header will be included. `content_type` is the mime content type, not // including the "Content-Type: " string. - // |extra_headers| should be either empty or a list of headers where each + // `extra_headers` should be either empty or a list of headers where each // header terminates with "\r\n". - // |data| is the body of the message. It's length will be specified via + // `data` is the body of the message. It's length will be specified via // a "Content-Length" header. bool Send(const std::string& status, bool connection_close, diff --git a/examples/peerconnection/server/peer_channel.h b/examples/peerconnection/server/peer_channel.h index c3bb1ad10b..c3624908ac 100644 --- a/examples/peerconnection/server/peer_channel.h +++ b/examples/peerconnection/server/peer_channel.h @@ -80,7 +80,7 @@ class PeerChannel { // request. Otherwise the request is not peerconnection related. static bool IsPeerConnection(const DataSocket* ds); - // Finds a connected peer that's associated with the |ds| socket. + // Finds a connected peer that's associated with the `ds` socket. ChannelMember* Lookup(DataSocket* ds) const; // Checks if the request has a "peer_id" parameter and if so, looks up the diff --git a/examples/stunprober/main.cc b/examples/stunprober/main.cc index abd35e362b..d0ed92cc34 100644 --- a/examples/stunprober/main.cc +++ b/examples/stunprober/main.cc @@ -21,6 +21,7 @@ #include "rtc_base/helpers.h" #include "rtc_base/logging.h" #include "rtc_base/network.h" +#include "rtc_base/physical_socket_server.h" #include "rtc_base/socket_address.h" #include "rtc_base/ssl_adapter.h" #include "rtc_base/thread.h" @@ -122,23 +123,23 @@ int main(int argc, char* argv[]) { rtc::InitializeSSL(); rtc::InitRandom(rtc::Time32()); - rtc::Thread* thread = rtc::ThreadManager::Instance()->WrapCurrentThread(); - std::unique_ptr socket_factory( - new rtc::BasicPacketSocketFactory()); + rtc::PhysicalSocketServer socket_server; + rtc::AutoSocketServerThread thread(&socket_server); + auto socket_factory = + std::make_unique(&socket_server); std::unique_ptr network_manager( - new rtc::BasicNetworkManager()); + new rtc::BasicNetworkManager(&socket_server)); rtc::NetworkManager::NetworkList networks; network_manager->GetNetworks(&networks); - StunProber* prober = - new StunProber(socket_factory.get(), rtc::Thread::Current(), networks); - auto finish_callback = [thread](StunProber* prober, int result) { - StopTrial(thread, prober, result); + auto prober = std::make_unique(socket_factory.get(), + rtc::Thread::Current(), networks); + auto finish_callback = [&thread](StunProber* prober, int result) { + StopTrial(&thread, prober, result); }; prober->Start(server_addresses, absl::GetFlag(FLAGS_shared_socket), absl::GetFlag(FLAGS_interval), absl::GetFlag(FLAGS_pings_per_ip), absl::GetFlag(FLAGS_timeout), AsyncCallback(finish_callback)); - thread->Run(); - delete prober; + thread.Run(); return 0; } diff --git a/examples/turnserver/turnserver_main.cc b/examples/turnserver/turnserver_main.cc index 5e2a747f20..055d9baaee 100644 --- a/examples/turnserver/turnserver_main.cc +++ b/examples/turnserver/turnserver_main.cc @@ -20,8 +20,8 @@ #include "p2p/base/turn_server.h" #include "rtc_base/async_udp_socket.h" #include "rtc_base/ip_address.h" +#include "rtc_base/physical_socket_server.h" #include "rtc_base/socket_address.h" -#include "rtc_base/socket_server.h" #include "rtc_base/thread.h" namespace { @@ -69,16 +69,17 @@ int main(int argc, char* argv[]) { return 1; } - rtc::Thread* main = rtc::Thread::Current(); + rtc::PhysicalSocketServer socket_server; + rtc::AutoSocketServerThread main(&socket_server); rtc::AsyncUDPSocket* int_socket = - rtc::AsyncUDPSocket::Create(main->socketserver(), int_addr); + rtc::AsyncUDPSocket::Create(&socket_server, int_addr); if (!int_socket) { std::cerr << "Failed to create a UDP socket bound at" << int_addr.ToString() << std::endl; return 1; } - cricket::TurnServer server(main); + cricket::TurnServer server(&main); std::fstream auth_file(argv[4], std::fstream::in); TurnFileAuth auth(auth_file.is_open() @@ -88,11 +89,12 @@ int main(int argc, char* argv[]) { server.set_software(kSoftware); server.set_auth_hook(&auth); server.AddInternalSocket(int_socket, cricket::PROTO_UDP); - server.SetExternalSocketFactory(new rtc::BasicPacketSocketFactory(), - rtc::SocketAddress(ext_addr, 0)); + server.SetExternalSocketFactory( + new rtc::BasicPacketSocketFactory(&socket_server), + rtc::SocketAddress(ext_addr, 0)); std::cout << "Listening internally at " << int_addr.ToString() << std::endl; - main->Run(); + main.Run(); return 0; } diff --git a/examples/unityplugin/java/src/org/webrtc/UnityUtility.java b/examples/unityplugin/java/src/org/webrtc/UnityUtility.java index a8f53da34d..bd8bbfa449 100644 --- a/examples/unityplugin/java/src/org/webrtc/UnityUtility.java +++ b/examples/unityplugin/java/src/org/webrtc/UnityUtility.java @@ -11,7 +11,7 @@ package org.webrtc; import android.content.Context; -import android.support.annotation.Nullable; +import androidx.annotation.Nullable; public class UnityUtility { private static final String VIDEO_CAPTURER_THREAD_NAME = "VideoCapturerThread"; diff --git a/examples/unityplugin/simple_peer_connection.cc b/examples/unityplugin/simple_peer_connection.cc index c7e5185bdc..16c580e767 100644 --- a/examples/unityplugin/simple_peer_connection.cc +++ b/examples/unityplugin/simple_peer_connection.cc @@ -61,7 +61,7 @@ class CapturerTrackSource : public webrtc::VideoTrackSource { if (!capturer) { return nullptr; } - return new rtc::RefCountedObject(std::move(capturer)); + return rtc::make_ref_counted(std::move(capturer)); } protected: @@ -99,12 +99,12 @@ class DummySetSessionDescriptionObserver : public webrtc::SetSessionDescriptionObserver { public: static DummySetSessionDescriptionObserver* Create() { - return new rtc::RefCountedObject(); + return rtc::make_ref_counted(); } - virtual void OnSuccess() { RTC_LOG(INFO) << __FUNCTION__; } + virtual void OnSuccess() { RTC_LOG(LS_INFO) << __FUNCTION__; } virtual void OnFailure(webrtc::RTCError error) { - RTC_LOG(INFO) << __FUNCTION__ << " " << ToString(error.type()) << ": " - << error.message(); + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << ToString(error.type()) << ": " + << error.message(); } protected: @@ -190,7 +190,6 @@ bool SimplePeerConnection::CreatePeerConnection(const char** turn_urls, webrtc::PeerConnectionInterface::IceServer stun_server; stun_server.uri = GetPeerConnectionString(); config_.servers.push_back(stun_server); - config_.enable_dtls_srtp = false; auto result = g_peer_connection_factory->CreatePeerConnectionOrError( config_, webrtc::PeerConnectionDependencies(this)); @@ -270,7 +269,7 @@ void SimplePeerConnection::OnSuccess( } void SimplePeerConnection::OnFailure(webrtc::RTCError error) { - RTC_LOG(LERROR) << ToString(error.type()) << ": " << error.message(); + RTC_LOG(LS_ERROR) << ToString(error.type()) << ": " << error.message(); // TODO(hta): include error.type in the message if (OnFailureMessage) @@ -279,7 +278,7 @@ void SimplePeerConnection::OnFailure(webrtc::RTCError error) { void SimplePeerConnection::OnIceCandidate( const webrtc::IceCandidateInterface* candidate) { - RTC_LOG(INFO) << __FUNCTION__ << " " << candidate->sdp_mline_index(); + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << candidate->sdp_mline_index(); std::string sdp; if (!candidate->ToString(&sdp)) { @@ -344,12 +343,12 @@ bool SimplePeerConnection::SetRemoteDescription(const char* type, webrtc::SessionDescriptionInterface* session_description( webrtc::CreateSessionDescription(desc_type, remote_desc, &error)); if (!session_description) { - RTC_LOG(WARNING) << "Can't parse received session description message. " - "SdpParseError was: " - << error.description; + RTC_LOG(LS_WARNING) << "Can't parse received session description message. " + "SdpParseError was: " + << error.description; return false; } - RTC_LOG(INFO) << " Received session description :" << remote_desc; + RTC_LOG(LS_INFO) << " Received session description :" << remote_desc; peer_connection_->SetRemoteDescription( DummySetSessionDescriptionObserver::Create(), session_description); @@ -366,16 +365,16 @@ bool SimplePeerConnection::AddIceCandidate(const char* candidate, std::unique_ptr ice_candidate( webrtc::CreateIceCandidate(sdp_mid, sdp_mlineindex, candidate, &error)); if (!ice_candidate.get()) { - RTC_LOG(WARNING) << "Can't parse received candidate message. " - "SdpParseError was: " - << error.description; + RTC_LOG(LS_WARNING) << "Can't parse received candidate message. " + "SdpParseError was: " + << error.description; return false; } if (!peer_connection_->AddIceCandidate(ice_candidate.get())) { - RTC_LOG(WARNING) << "Failed to apply the received candidate"; + RTC_LOG(LS_WARNING) << "Failed to apply the received candidate"; return false; } - RTC_LOG(INFO) << " Received candidate :" << candidate; + RTC_LOG(LS_INFO) << " Received candidate :" << candidate; return true; } @@ -410,7 +409,7 @@ void SimplePeerConnection::SetAudioControl() { void SimplePeerConnection::OnAddStream( rtc::scoped_refptr stream) { - RTC_LOG(INFO) << __FUNCTION__ << " " << stream->id(); + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << stream->id(); remote_stream_ = stream; if (remote_video_observer_ && !remote_stream_->GetVideoTracks().empty()) { remote_stream_->GetVideoTracks()[0]->AddOrUpdateSink( @@ -447,10 +446,9 @@ void SimplePeerConnection::AddStreams(bool audio_only) { RTC_DCHECK(texture_helper != nullptr) << "Cannot get the Surface Texture Helper."; - rtc::scoped_refptr source( - new rtc::RefCountedObject( - g_signaling_thread.get(), env, /* is_screencast= */ false, - /* align_timestamps= */ true)); + auto source = rtc::make_ref_counted( + g_signaling_thread.get(), env, /*is_screencast=*/false, + /*align_timestamps=*/true); // link with VideoCapturer (Camera); jmethodID link_camera_method = webrtc::GetStaticMethodID( diff --git a/examples/unityplugin/simple_peer_connection.h b/examples/unityplugin/simple_peer_connection.h index b99bde930e..d5cebc9940 100644 --- a/examples/unityplugin/simple_peer_connection.h +++ b/examples/unityplugin/simple_peer_connection.h @@ -31,7 +31,7 @@ class SimplePeerConnection : public webrtc::PeerConnectionObserver, ~SimplePeerConnection() {} bool InitializePeerConnection(const char** turn_urls, - const int no_of_urls, + int no_of_urls, const char* username, const char* credential, bool is_receiver); @@ -56,13 +56,13 @@ class SimplePeerConnection : public webrtc::PeerConnectionObserver, ICECANDIDATEREADYTOSEND_CALLBACK callback); bool SetRemoteDescription(const char* type, const char* sdp); bool AddIceCandidate(const char* sdp, - const int sdp_mlineindex, + int sdp_mlineindex, const char* sdp_mid); protected: // create a peerconneciton and add the turn servers info to the configuration. bool CreatePeerConnection(const char** turn_urls, - const int no_of_urls, + int no_of_urls, const char* username, const char* credential); void CloseDataChannel(); diff --git a/examples/unityplugin/unity_plugin_apis.cc b/examples/unityplugin/unity_plugin_apis.cc index 34c28d926a..672330faec 100644 --- a/examples/unityplugin/unity_plugin_apis.cc +++ b/examples/unityplugin/unity_plugin_apis.cc @@ -27,7 +27,7 @@ int CreatePeerConnection(const char** turn_urls, const char* credential, bool mandatory_receive_video) { g_peer_connection_map[g_peer_connection_id] = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); if (!g_peer_connection_map[g_peer_connection_id]->InitializePeerConnection( turn_urls, no_of_urls, username, credential, mandatory_receive_video)) diff --git a/examples/unityplugin/unity_plugin_apis.h b/examples/unityplugin/unity_plugin_apis.h index b32f9e2caf..8b8fe0fe80 100644 --- a/examples/unityplugin/unity_plugin_apis.h +++ b/examples/unityplugin/unity_plugin_apis.h @@ -31,7 +31,7 @@ typedef void (*DATAFROMEDATECHANNELREADY_CALLBACK)(const char* msg); typedef void (*FAILURE_CALLBACK)(const char* msg); typedef void (*LOCALSDPREADYTOSEND_CALLBACK)(const char* type, const char* sdp); typedef void (*ICECANDIDATEREADYTOSEND_CALLBACK)(const char* candidate, - const int sdp_mline_index, + int sdp_mline_index, const char* sdp_mid); typedef void (*AUDIOBUSREADY_CALLBACK)(const void* audio_data, int bits_per_sample, @@ -47,7 +47,7 @@ typedef void (*AUDIOBUSREADY_CALLBACK)(const void* audio_data, extern "C" { // Create a peerconnection and return a unique peer connection id. WEBRTC_PLUGIN_API int CreatePeerConnection(const char** turn_urls, - const int no_of_urls, + int no_of_urls, const char* username, const char* credential, bool mandatory_receive_video); @@ -75,9 +75,9 @@ WEBRTC_PLUGIN_API bool SetRemoteDescription(int peer_connection_id, const char* type, const char* sdp); // Add ice candidate. -WEBRTC_PLUGIN_API bool AddIceCandidate(const int peer_connection_id, +WEBRTC_PLUGIN_API bool AddIceCandidate(int peer_connection_id, const char* candidate, - const int sdp_mlineindex, + int sdp_mlineindex, const char* sdp_mid); // Register callback functions. diff --git a/g3doc/abseil-in-webrtc.md b/g3doc/abseil-in-webrtc.md index 692ebe2b0b..6f0b4812e8 100644 --- a/g3doc/abseil-in-webrtc.md +++ b/g3doc/abseil-in-webrtc.md @@ -26,6 +26,7 @@ will generate a shared library. ## **Allowed** * `absl::bind_front` +* `absl::Cleanup` * `absl::InlinedVector` * `absl::WrapUnique` * `absl::optional` and related stuff from `absl/types/optional.h`. @@ -41,6 +42,7 @@ will generate a shared library. * `absl/base/const_init.h` for mutex initialization. * The macros in `absl/base/attributes.h`, `absl/base/config.h` and `absl/base/macros.h`. +* `absl/numeric/bits.h` ## **Disallowed** diff --git a/g3doc/become_a_committer.md b/g3doc/become_a_committer.md new file mode 100644 index 0000000000..8045c067f3 --- /dev/null +++ b/g3doc/become_a_committer.md @@ -0,0 +1,84 @@ +# How to get tryjob access or become WebRTC committer + + + + +## Overview + +There are two levels of WebRTC contributors access: + +1. Tryjob access - permits contributor to run tests for their changes using + WebRTC infrastructure +2. WebRTC committer rights - permits to submit changes to the WebRTC code base. + This includes tryjob access. + +## Getting tryjob access + +To get tryjob access applicant has to do a contribution around 10-20 CLs to the +WebRTC code base. After that, they should file a bug using +[Get tryjob access template][7], specifying the email which was used for the +contributions and to which access will be granted, and listing contributed CLs. + +The access will be granted when the ticket is resolved by one of the project +members. In case of rejection the explanation will be provided. + +## WebRTC committer duties + +WebRTC committers are responsible for keeping WebRTC codebase in a good shape +including, but not limited to the following aspects: + +* Code complexity and correctness +* C++ best practices +* Code formatting +* Test coverage +* Class/function level and conceptual documentation + +Whenever a committer sets `Code Review +1` label on the CL, they approve that +the CL fulfills WebRTC style guides, language mastery, testability and +documentation. Being a committer means being responsible for the WebRTC codebase +health and code quality. + +## Becoming a WebRTC committer + +To write code in WebRTC you don't need to be a committer (also see [FAQ][1]), +but to submit code to WebRTC you do. So if you don't plan to work on the WebRTC +codebase regularly, you can ask other committers through code review to submit +your patches, but if you are going to work in the WebRTC codebase, then it's +recommended to apply for WebRTC committer rights obtaining process. + +1. If you are going to write in C++ make yourself familiar with with C++ style + guides: + + * [Google style guide][5] + * [Chromium style guide][2] + * [WebRTC style guide][3] + +2. Create a ticket to obtain WebRTC committers rights in Monorail. Please use + [this template][6] of it. + +3. Pick a mentor among WebRTC committers, who will review your CLs. For C++ + authors, the mentor will also look for C++ readability skills. It's + recommended to ask someone who is familiar with the code base which you will + be working on (you can check OWNERS files to find such person). Otherwise + you can reach out to committers mailing list \. + +4. Send CLs to the mentor for review and attach them to the created ticket. + +5. When the mentor decides that you are ready (for C++ authors their C++ + readability skills are good enough), they will send a proposal for granting + WebRTC committer rights to the reviewing committee mailing list to review. + If the proposal will be approved, then committer rights will be granted. + Committee members will have up to 5 business days to answer. In case of + rejection detailed feedback on what aspects should be improved will be + provided. + +6. Also as any contributor you must sign and return the + [Contributor License Agreement][4] + +[1]: https://webrtc.googlesource.com/src/+/refs/heads/main/docs/faq.md#to-be-a-contributor_do-i-need-to-sign-any-agreements +[2]: https://chromium.googlesource.com/chromium/src/+/refs/heads/main/styleguide/c++/c++.md +[3]: https://webrtc.googlesource.com/src/+/refs/heads/main/g3doc/style-guide.md +[4]: https://developers.google.com/open-source/cla/individual?hl=en +[5]: https://google.github.io/styleguide/cppguide.html +[6]: https://bugs.chromium.org/p/webrtc/issues/entry?template=Become+WebRTC+committer +[7]: https://bugs.chromium.org/p/webrtc/issues/entry?template=Get+tryjob+access diff --git a/g3doc/how_to_write_documentation.md b/g3doc/how_to_write_documentation.md index 6fbca116a5..a29d686e66 100644 --- a/g3doc/how_to_write_documentation.md +++ b/g3doc/how_to_write_documentation.md @@ -67,6 +67,5 @@ The documentation is written in GitHub Markdown Documentation of specific classes and function APIs and their usage, including their purpose, is embedded in the .h files defining that API. See -[C++ style guide](https://chromium.googlesource.com/chromium/src/+/master/styleguide/c++/c++.md) +[C++ style guide](https://chromium.googlesource.com/chromium/src/+/main/styleguide/c++/c++.md) for pointers on how to write API documentatin in .h files. - diff --git a/g3doc/implementation_basics.md b/g3doc/implementation_basics.md index 933941a0d1..fd906d03c3 100644 --- a/g3doc/implementation_basics.md +++ b/g3doc/implementation_basics.md @@ -82,7 +82,15 @@ in the (slow) process of being removed from the codebase. * RecursiveCriticalSection. Try to use [webrtc::Mutex][6] instead, and don't recurse. - +## Enum-To-String functions +If there is a need to convert an enum to a string representation, such as for +enums exposed at the Javascript API interface, the recommended way is to write +a function named AsString, declared "static constexpr" and returning an +absl::string_view. The declaration should be right after the enum declaration, +in the same scope; the implementation (which must be marked "inline") should +be at the end of the same header file. + +If the enum is not defined within a class, the "static" keyword is not needed. [1]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/units/timestamp.h;drc=b95d90b78a3491ef8e8aa0640dd521515ec881ca;l=29 [2]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/rtc_base/thread.h;drc=1107751b6f11c35259a1c5c8a0f716e227b7e3b4;l=194 diff --git a/g3doc/sitemap.md b/g3doc/sitemap.md index c893d6ce3a..4a705c4da0 100644 --- a/g3doc/sitemap.md +++ b/g3doc/sitemap.md @@ -3,6 +3,7 @@ * Code * [Style guide](/g3doc/style-guide.md) * [Documentation](/g3doc/how_to_write_documentation.md) + * [Become a committer](/g3doc/become_a_committer.md) * [Public C++ API](/api/g3doc/index.md) * [Threading](/api/g3doc/threading_design.md) * Implementation diff --git a/g3doc/style-guide.md b/g3doc/style-guide.md index f3b0e8869d..62c99fc73b 100644 --- a/g3doc/style-guide.md +++ b/g3doc/style-guide.md @@ -1,16 +1,12 @@ # WebRTC coding style guide - + ## General advice Some older parts of the code violate the style guide in various ways. - -* If making small changes to such code, follow the style guide when it's - reasonable to do so, but in matters of formatting etc., it is often better to - be consistent with the surrounding code. -* If making large changes to such code, consider first cleaning it up in a +If making large changes to such code, consider first cleaning it up in a separate CL. ## C++ @@ -20,23 +16,19 @@ WebRTC follows the [Chromium C++ style guide][chr-style] and the style guide trumps the Google style guide, and the rules in this file trump them both. -[chr-style]: https://chromium.googlesource.com/chromium/src/+/HEAD/styleguide/c++/c++.md +[chr-style]: https://chromium.googlesource.com/chromium/src/+/main/styleguide/c++/c++.md [goog-style]: https://google.github.io/styleguide/cppguide.html ### C++ version -WebRTC is written in C++14, but with some restrictions: - -* We only allow the subset of C++14 (language and library) that is not banned by - Chromium; see the [list of banned C++ features in Chromium][chromium-cpp]. -* We only allow the subset of C++14 that is also valid C++17; otherwise, users - would not be able to compile WebRTC in C++17 mode. +WebRTC is written in C++17, but with some restrictions: -[chromium-cpp]: https://chromium-cpp.appspot.com/ +* We only allow the subset of C++17 (language and library) that is not banned by + Chromium; see the [list of banned C++ features in Chromium][chr-style-cpp]. +* We only allow the subset of C++17 that is also valid C++20; otherwise, users + would not be able to compile WebRTC in C++20 mode. -Unlike the Chromium and Google C++ style guides, we do not allow C++20-style -designated initializers, because we want to stay compatible with compilers that -do not yet support them. +[chr-style-cpp]: https://chromium.googlesource.com/chromium/src/+/main/styleguide/c++/c++-features.md ### Abseil @@ -108,7 +100,7 @@ In other words, rename the existing function, and provide an inline wrapper using the original name that calls it. That way, callers who are willing to call it using the `DEPRECATED_`-prefixed name don't get the warning. -[ABSL_DEPRECATED]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/abseil-cpp/absl/base/attributes.h?q=ABSL_DEPRECATED +[ABSL_DEPRECATED]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/abseil-cpp/absl/base/attributes.h?q=ABSL_DEPRECATED ### ArrayView @@ -141,9 +133,7 @@ The following smart pointer types are recommended: * `rtc::scoped_refptr` for all objects with shared ownership Use of `std::shared_ptr` is *not permitted*. It is banned in the Chromium style -guide (overriding the Google style guide), and offers no compelling advantage -over `rtc::scoped_refptr` (which is cloned from the corresponding Chromium -type). See the +guide (overriding the Google style guide). See the [list of banned C++ library features in Chromium][chr-std-shared-ptr] for more information. @@ -152,7 +142,7 @@ In most cases, one will want to explicitly control lifetimes, and therefore use exist both from the API users and internally, with no way to invalidate pointers held by the API user, `rtc::scoped_refptr` can be appropriate. -[chr-std-shared-ptr]: https://chromium-cpp.appspot.com/#library-blocklist +[chr-std-shared-ptr]: https://chromium.googlesource.com/chromium/src/+/main/styleguide/c++/c++-features.md#shared-pointers-banned ### `std::bind` @@ -180,11 +170,8 @@ headers you need. There's a substantial chunk of legacy C code in WebRTC, and a lot of it is old enough that it violates the parts of the C++ style guide that also applies to C (naming etc.) for the simple reason that it pre-dates the use of the current C++ -style guide for this code base. - -* If making small changes to C code, mimic the style of the surrounding code. -* If making large changes to C code, consider converting the whole thing to C++ - first. +style guide for this code base. If making large changes to C code, consider +converting the whole thing to C++ first. ## Java @@ -197,13 +184,13 @@ WebRTC follows the [Google Java style guide][goog-java-style]. WebRTC follows the [Chromium Objective-C and Objective-C++ style guide][chr-objc-style]. -[chr-objc-style]: https://chromium.googlesource.com/chromium/src/+/HEAD/styleguide/objective-c/objective-c.md +[chr-objc-style]: https://chromium.googlesource.com/chromium/src/+/main/styleguide/objective-c/objective-c.md ## Python WebRTC follows [Chromium's Python style][chr-py-style]. -[chr-py-style]: https://chromium.googlesource.com/chromium/src/+/HEAD/styleguide/python/python.md +[chr-py-style]: https://chromium.googlesource.com/chromium/src/+/main/styleguide/python/python.md ## Build files diff --git a/logging/BUILD.gn b/logging/BUILD.gn index 90a05f7c49..1b7993104e 100644 --- a/logging/BUILD.gn +++ b/logging/BUILD.gn @@ -18,6 +18,7 @@ if (is_android) { group("logging") { deps = [ ":rtc_event_audio", + ":rtc_event_begin_end", ":rtc_event_bwe", ":rtc_event_log_impl_encoder", ":rtc_event_pacing", @@ -32,6 +33,35 @@ rtc_source_set("rtc_event_log_api") { deps = [ "../api/rtc_event_log" ] } +rtc_library("rtc_event_field") { + sources = [ + "rtc_event_log/events/fixed_length_encoding_parameters_v3.cc", + "rtc_event_log/events/fixed_length_encoding_parameters_v3.h", + "rtc_event_log/events/rtc_event_definition.h", + "rtc_event_log/events/rtc_event_field_encoding.cc", + "rtc_event_log/events/rtc_event_field_encoding.h", + "rtc_event_log/events/rtc_event_field_encoding_parser.cc", + "rtc_event_log/events/rtc_event_field_encoding_parser.h", + "rtc_event_log/events/rtc_event_field_extraction.cc", + "rtc_event_log/events/rtc_event_field_extraction.h", + ] + + deps = [ + ":rtc_event_number_encodings", + "../api:array_view", + "../api/rtc_event_log", + "../api/units:timestamp", + "../rtc_base:bitstream_reader", + "../rtc_base:checks", + "../rtc_base:logging", + "../rtc_base:rtc_base_approved", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + rtc_library("rtc_stream_config") { sources = [ "rtc_event_log/rtc_stream_config.cc", @@ -51,11 +81,14 @@ rtc_library("rtc_event_pacing") { ] deps = [ - "../api:scoped_refptr", + ":rtc_event_field", "../api/rtc_event_log", "../api/units:timestamp", ] - absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] + absl_deps = [ + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings", + ] } rtc_library("rtc_event_audio") { @@ -71,14 +104,33 @@ rtc_library("rtc_event_audio") { ] deps = [ + ":rtc_event_field", ":rtc_stream_config", - "../api:scoped_refptr", "../api/rtc_event_log", "../api/units:timestamp", "../modules/audio_coding:audio_network_adaptor_config", "../rtc_base:checks", ] - absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] + absl_deps = [ + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings", + ] +} + +rtc_library("rtc_event_begin_end") { + sources = [ + "rtc_event_log/events/rtc_event_begin_log.cc", + "rtc_event_log/events/rtc_event_begin_log.h", + "rtc_event_log/events/rtc_event_end_log.cc", + "rtc_event_log/events/rtc_event_end_log.h", + ] + deps = [ + ":rtc_event_field", + "../api:array_view", + "../api/rtc_event_log", + "../api/units:timestamp", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } rtc_library("rtc_event_bwe") { @@ -99,14 +151,15 @@ rtc_library("rtc_event_bwe") { ] deps = [ + ":rtc_event_field", "../api:network_state_predictor_api", - "../api:scoped_refptr", "../api/rtc_event_log", "../api/units:data_rate", "../api/units:timestamp", ] absl_deps = [ "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] } @@ -117,6 +170,7 @@ rtc_library("rtc_event_frame_events") { "rtc_event_log/events/rtc_event_frame_decoded.h", ] deps = [ + ":rtc_event_field", "../api/rtc_event_log", "../api/units:timestamp", "../api/video:video_frame", @@ -124,6 +178,7 @@ rtc_library("rtc_event_frame_events") { ] absl_deps = [ "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] } @@ -139,18 +194,21 @@ rtc_library("rtc_event_generic_packet_events") { "rtc_event_log/events/rtc_event_generic_packet_sent.h", ] deps = [ + ":rtc_event_field", "../api/rtc_event_log", "../api/units:timestamp", "../rtc_base:timeutils", ] absl_deps = [ "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] } rtc_library("rtc_event_rtp_rtcp") { sources = [ + "rtc_event_log/events/logged_rtp_rtcp.h", "rtc_event_log/events/rtc_event_rtcp_packet_incoming.cc", "rtc_event_log/events/rtc_event_rtcp_packet_incoming.h", "rtc_event_log/events/rtc_event_rtcp_packet_outgoing.cc", @@ -162,14 +220,19 @@ rtc_library("rtc_event_rtp_rtcp") { ] deps = [ + ":rtc_event_field", "../api:array_view", - "../api:scoped_refptr", + "../api:rtp_headers", "../api/rtc_event_log", + "../api/units:timestamp", "../modules/rtp_rtcp:rtp_rtcp_format", "../rtc_base:checks", "../rtc_base:rtc_base_approved", ] - absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] + absl_deps = [ + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings", + ] } rtc_library("rtc_event_video") { @@ -181,13 +244,42 @@ rtc_library("rtc_event_video") { ] deps = [ + ":rtc_event_field", ":rtc_stream_config", - "../api:scoped_refptr", "../api/rtc_event_log", "../api/units:timestamp", "../rtc_base:checks", ] - absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] + absl_deps = [ + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings", + ] +} + +rtc_library("rtc_event_number_encodings") { + sources = [ + "rtc_event_log/encoder/bit_writer.cc", + "rtc_event_log/encoder/bit_writer.h", + "rtc_event_log/encoder/rtc_event_log_encoder_common.cc", + "rtc_event_log/encoder/rtc_event_log_encoder_common.h", + "rtc_event_log/encoder/var_int.cc", + "rtc_event_log/encoder/var_int.h", + ] + + defines = [] + + deps = [ + "../rtc_base:bitstream_reader", + "../rtc_base:checks", + "../rtc_base:ignore_wundef", + "../rtc_base:macromagic", + "../rtc_base:rtc_base_approved", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] } # TODO(eladalon): Break down into (1) encoder and (2) decoder; we don't need @@ -198,18 +290,16 @@ rtc_library("rtc_event_log_impl_encoder") { "rtc_event_log/encoder/blob_encoding.h", "rtc_event_log/encoder/delta_encoding.cc", "rtc_event_log/encoder/delta_encoding.h", - "rtc_event_log/encoder/rtc_event_log_encoder_common.cc", - "rtc_event_log/encoder/rtc_event_log_encoder_common.h", - "rtc_event_log/encoder/var_int.cc", - "rtc_event_log/encoder/var_int.h", ] defines = [] deps = [ + ":rtc_event_number_encodings", "../api:rtp_headers", "../api:rtp_parameters", "../api/transport:network_control", + "../rtc_base:bitstream_reader", "../rtc_base:checks", "../rtc_base:ignore_wundef", "../rtc_base:rtc_base_approved", @@ -224,7 +314,9 @@ rtc_library("rtc_event_log_impl_encoder") { deps += [ ":ice_log", ":rtc_event_audio", + ":rtc_event_begin_end", ":rtc_event_bwe", + ":rtc_event_field", ":rtc_event_frame_events", ":rtc_event_generic_packet_events", ":rtc_event_log2_proto", @@ -244,6 +336,8 @@ rtc_library("rtc_event_log_impl_encoder") { "rtc_event_log/encoder/rtc_event_log_encoder_legacy.h", "rtc_event_log/encoder/rtc_event_log_encoder_new_format.cc", "rtc_event_log/encoder/rtc_event_log_encoder_new_format.h", + "rtc_event_log/encoder/rtc_event_log_encoder_v3.cc", + "rtc_event_log/encoder/rtc_event_log_encoder_v3.h", ] } } @@ -312,7 +406,6 @@ if (rtc_enable_protobuf) { rtc_library("rtc_event_log_parser") { visibility = [ "*" ] sources = [ - "rtc_event_log/logged_events.cc", "rtc_event_log/logged_events.h", "rtc_event_log/rtc_event_log_parser.cc", "rtc_event_log/rtc_event_log_parser.h", @@ -323,12 +416,14 @@ if (rtc_enable_protobuf) { deps = [ ":ice_log", ":rtc_event_audio", + ":rtc_event_begin_end", ":rtc_event_bwe", ":rtc_event_frame_events", ":rtc_event_generic_packet_events", ":rtc_event_log2_proto", ":rtc_event_log_impl_encoder", ":rtc_event_log_proto", + ":rtc_event_number_encodings", ":rtc_event_pacing", ":rtc_event_rtp_rtcp", ":rtc_event_video", @@ -345,7 +440,6 @@ if (rtc_enable_protobuf) { "../call:video_stream_api", "../modules:module_api_public", "../modules/audio_coding:audio_network_adaptor", - "../modules/rtp_rtcp", "../modules/rtp_rtcp:rtp_rtcp_format", "../rtc_base:checks", "../rtc_base:ignore_wundef", @@ -369,6 +463,8 @@ if (rtc_enable_protobuf) { "rtc_event_log/encoder/delta_encoding_unittest.cc", "rtc_event_log/encoder/rtc_event_log_encoder_common_unittest.cc", "rtc_event_log/encoder/rtc_event_log_encoder_unittest.cc", + "rtc_event_log/events/rtc_event_field_encoding_unittest.cc", + "rtc_event_log/events/rtc_event_field_extraction_unittest.cc", "rtc_event_log/rtc_event_log_unittest.cc", "rtc_event_log/rtc_event_log_unittest_helper.cc", "rtc_event_log/rtc_event_log_unittest_helper.h", @@ -378,12 +474,14 @@ if (rtc_enable_protobuf) { ":ice_log", ":rtc_event_audio", ":rtc_event_bwe", + ":rtc_event_field", ":rtc_event_frame_events", ":rtc_event_generic_packet_events", ":rtc_event_log2_proto", ":rtc_event_log_impl_encoder", ":rtc_event_log_parser", ":rtc_event_log_proto", + ":rtc_event_number_encodings", ":rtc_event_pacing", ":rtc_event_rtp_rtcp", ":rtc_event_video", @@ -406,6 +504,7 @@ if (rtc_enable_protobuf) { "../system_wrappers", "../test:fileutils", "../test:test_support", + "../test/logging:log_writer", "//testing/gtest", ] absl_deps = [ @@ -455,13 +554,17 @@ rtc_library("ice_log") { ] deps = [ + ":rtc_event_field", "../api:libjingle_logging_api", "../api:libjingle_peerconnection_api", # For api/dtls_transport_interface.h "../api/rtc_event_log", "../api/units:timestamp", "../rtc_base:rtc_base_approved", ] - absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] + absl_deps = [ + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings", + ] } if (rtc_include_tests) { diff --git a/logging/g3doc/rtc_event_log.md b/logging/g3doc/rtc_event_log.md index c7996e0b42..6b81de1066 100644 --- a/logging/g3doc/rtc_event_log.md +++ b/logging/g3doc/rtc_event_log.md @@ -25,7 +25,7 @@ The contents include: No guarantees are made on the wire format, and the format may change without prior notice. To maintain compatibility with past and future formats, analysis tools should be built on top of the provided -[rtc_event_log_parser.h](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/logging/rtc_event_log/rtc_event_log_parser.h) +[rtc_event_log_parser.h](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/logging/rtc_event_log/rtc_event_log_parser.h) In particular, an analysis tool should *not* read the log as a protobuf. diff --git a/logging/rtc_event_log/encoder/bit_writer.cc b/logging/rtc_event_log/encoder/bit_writer.cc new file mode 100644 index 0000000000..e8748d3db3 --- /dev/null +++ b/logging/rtc_event_log/encoder/bit_writer.cc @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "logging/rtc_event_log/encoder/bit_writer.h" + +namespace webrtc { + +namespace { +size_t BitsToBytes(size_t bits) { + return (bits / 8) + (bits % 8 > 0 ? 1 : 0); +} +} // namespace + +void BitWriter::WriteBits(uint64_t val, size_t bit_count) { + RTC_DCHECK(valid_); + const bool success = bit_writer_.WriteBits(val, bit_count); + RTC_DCHECK(success); + written_bits_ += bit_count; +} + +void BitWriter::WriteBits(absl::string_view input) { + RTC_DCHECK(valid_); + for (char c : input) { + WriteBits(static_cast(c), CHAR_BIT); + } +} + +// Returns everything that was written so far. +// Nothing more may be written after this is called. +std::string BitWriter::GetString() { + RTC_DCHECK(valid_); + valid_ = false; + + buffer_.resize(BitsToBytes(written_bits_)); + written_bits_ = 0; + + std::string result; + std::swap(buffer_, result); + return result; +} + +} // namespace webrtc diff --git a/logging/rtc_event_log/encoder/bit_writer.h b/logging/rtc_event_log/encoder/bit_writer.h new file mode 100644 index 0000000000..421e7c4370 --- /dev/null +++ b/logging/rtc_event_log/encoder/bit_writer.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef LOGGING_RTC_EVENT_LOG_ENCODER_BIT_WRITER_H_ +#define LOGGING_RTC_EVENT_LOG_ENCODER_BIT_WRITER_H_ + +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "rtc_base/bit_buffer.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +// Wrap BitBufferWriter and extend its functionality by (1) keeping track of +// the number of bits written and (2) owning its buffer. +class BitWriter final { + public: + explicit BitWriter(size_t byte_count) + : buffer_(byte_count, '\0'), + bit_writer_(reinterpret_cast(&buffer_[0]), buffer_.size()), + written_bits_(0), + valid_(true) { + RTC_DCHECK_GT(byte_count, 0); + } + + BitWriter(const BitWriter&) = delete; + BitWriter& operator=(const BitWriter&) = delete; + + void WriteBits(uint64_t val, size_t bit_count); + + void WriteBits(absl::string_view input); + + // Returns everything that was written so far. + // Nothing more may be written after this is called. + std::string GetString(); + + private: + std::string buffer_; + rtc::BitBufferWriter bit_writer_; + // Note: Counting bits instead of bytes wraps around earlier than it has to, + // which means the maximum length is lower than it could be. We don't expect + // to go anywhere near the limit, though, so this is good enough. + size_t written_bits_; + bool valid_; +}; + +} // namespace webrtc + +#endif // LOGGING_RTC_EVENT_LOG_ENCODER_BIT_WRITER_H_ diff --git a/logging/rtc_event_log/encoder/blob_encoding.h b/logging/rtc_event_log/encoder/blob_encoding.h index b5b589aaf6..123fffe8e8 100644 --- a/logging/rtc_event_log/encoder/blob_encoding.h +++ b/logging/rtc_event_log/encoder/blob_encoding.h @@ -32,13 +32,13 @@ namespace webrtc { // EncodeBlobs() may not fail. // EncodeBlobs() never returns the empty string. // -// Calling DecodeBlobs() on an empty string, or with |num_of_blobs| set to 0, +// Calling DecodeBlobs() on an empty string, or with `num_of_blobs` set to 0, // is an error. // DecodeBlobs() returns an empty vector if it fails, e.g. due to a mismatch -// between |num_of_blobs| and |encoded_blobs|, which can happen if -// |encoded_blobs| is corrupted. +// between `num_of_blobs` and `encoded_blobs`, which can happen if +// `encoded_blobs` is corrupted. // When successful, DecodeBlobs() returns a vector of string_view objects, -// which refer to the original input (|encoded_blobs|), and therefore may +// which refer to the original input (`encoded_blobs`), and therefore may // not outlive it. // // Note that the returned std::string might have been reserved for significantly diff --git a/logging/rtc_event_log/encoder/delta_encoding.cc b/logging/rtc_event_log/encoder/delta_encoding.cc index 7bccdabdc8..3a2bee1d31 100644 --- a/logging/rtc_event_log/encoder/delta_encoding.cc +++ b/logging/rtc_event_log/encoder/delta_encoding.cc @@ -16,10 +16,11 @@ #include #include "absl/memory/memory.h" +#include "logging/rtc_event_log/encoder/bit_writer.h" #include "logging/rtc_event_log/encoder/var_int.h" #include "rtc_base/bit_buffer.h" +#include "rtc_base/bitstream_reader.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" @@ -71,8 +72,8 @@ uint64_t MaxUnsignedValueOfBitWidth(uint64_t bit_width) { : ((static_cast(1) << bit_width) - 1); } -// Computes the delta between |previous| and |current|, under the assumption -// that wrap-around occurs after |width| is exceeded. +// Computes the delta between `previous` and `current`, under the assumption +// that wrap-around occurs after `width` is exceeded. uint64_t UnsignedDelta(uint64_t previous, uint64_t current, uint64_t bit_mask) { return (current - previous) & bit_mask; } @@ -106,58 +107,6 @@ constexpr bool kDefaultSignedDeltas = false; constexpr bool kDefaultValuesOptional = false; constexpr uint64_t kDefaultValueWidthBits = 64; -// Wrap BitBufferWriter and extend its functionality by (1) keeping track of -// the number of bits written and (2) owning its buffer. -class BitWriter final { - public: - explicit BitWriter(size_t byte_count) - : buffer_(byte_count, '\0'), - bit_writer_(reinterpret_cast(&buffer_[0]), buffer_.size()), - written_bits_(0), - valid_(true) { - RTC_DCHECK_GT(byte_count, 0); - } - - void WriteBits(uint64_t val, size_t bit_count) { - RTC_DCHECK(valid_); - const bool success = bit_writer_.WriteBits(val, bit_count); - RTC_DCHECK(success); - written_bits_ += bit_count; - } - - void WriteBits(const std::string& input) { - RTC_DCHECK(valid_); - for (std::string::value_type c : input) { - WriteBits(c, 8 * sizeof(std::string::value_type)); - } - } - - // Returns everything that was written so far. - // Nothing more may be written after this is called. - std::string GetString() { - RTC_DCHECK(valid_); - valid_ = false; - - buffer_.resize(BitsToBytes(written_bits_)); - written_bits_ = 0; - - std::string result; - std::swap(buffer_, result); - return result; - } - - private: - std::string buffer_; - rtc::BitBufferWriter bit_writer_; - // Note: Counting bits instead of bytes wraps around earlier than it has to, - // which means the maximum length is lower than it could be. We don't expect - // to go anywhere near the limit, though, so this is good enough. - size_t written_bits_; - bool valid_; - - RTC_DISALLOW_COPY_AND_ASSIGN(BitWriter); -}; - // Parameters for fixed-size delta-encoding/decoding. // These are tailored for the sequence which will be encoded (e.g. widths). class FixedLengthEncodingParameters final { @@ -237,6 +186,9 @@ class FixedLengthDeltaEncoder final { absl::optional base, const std::vector>& values); + FixedLengthDeltaEncoder(const FixedLengthDeltaEncoder&) = delete; + FixedLengthDeltaEncoder& operator=(const FixedLengthDeltaEncoder&) = delete; + private: // Calculate min/max values of unsigned/signed deltas, given the bit width // of all the values in the series. @@ -258,7 +210,7 @@ class FixedLengthDeltaEncoder final { // released by it before it returns. They're mostly a convenient way to // avoid having to pass a lot of state between different functions. // Therefore, it was deemed acceptable to let them have a reference to - // |values|, whose lifetime must exceed the lifetime of |this|. + // `values`, whose lifetime must exceed the lifetime of `this`. FixedLengthDeltaEncoder(const FixedLengthEncodingParameters& params, absl::optional base, const std::vector>& values, @@ -287,7 +239,7 @@ class FixedLengthDeltaEncoder final { // The encoding scheme assumes that at least one value is transmitted OOB, // so that the first value can be encoded as a delta from that OOB value, - // which is |base_|. + // which is `base_`. const absl::optional base_; // The values to be encoded. @@ -299,8 +251,6 @@ class FixedLengthDeltaEncoder final { // ctor has finished running when this is constructed, so that the lower // bound on the buffer size would be guaranteed correct. std::unique_ptr writer_; - - RTC_DISALLOW_COPY_AND_ASSIGN(FixedLengthDeltaEncoder); }; // TODO(eladalon): Reduce the number of passes. @@ -606,20 +556,23 @@ class FixedLengthDeltaDecoder final { // will fail to decode this input, though. static bool IsSuitableDecoderFor(const std::string& input); - // Assuming that |input| is the result of fixed-size delta-encoding - // that took place with the same value to |base| and over |num_of_deltas| + // Assuming that `input` is the result of fixed-size delta-encoding + // that took place with the same value to `base` and over `num_of_deltas` // original values, this will return the sequence of original values. - // If an error occurs (can happen if |input| is corrupt), an empty + // If an error occurs (can happen if `input` is corrupt), an empty // vector will be returned. static std::vector> DecodeDeltas( const std::string& input, absl::optional base, size_t num_of_deltas); + FixedLengthDeltaDecoder(const FixedLengthDeltaDecoder&) = delete; + FixedLengthDeltaDecoder& operator=(const FixedLengthDeltaDecoder&) = delete; + private: - // Reads the encoding header in |input| and returns a FixedLengthDeltaDecoder + // Reads the encoding header in `input` and returns a FixedLengthDeltaDecoder // with the corresponding configuration, that can be used to decode the - // values in |input|. + // values in `input`. // If the encoding header is corrupt (contains an illegal configuration), // nullptr will be returned. // When a valid FixedLengthDeltaDecoder is returned, this does not mean that @@ -633,10 +586,10 @@ class FixedLengthDeltaDecoder final { // FixedLengthDeltaDecoder objects are to be created by DecodeDeltas() and // released by it before it returns. They're mostly a convenient way to // avoid having to pass a lot of state between different functions. - // Therefore, it was deemed acceptable that |reader| does not own the buffer - // it reads, meaning the lifetime of |this| must not exceed the lifetime - // of |reader|'s underlying buffer. - FixedLengthDeltaDecoder(std::unique_ptr reader, + // Therefore, it was deemed acceptable that `reader` does not own the buffer + // it reads, meaning the lifetime of `this` must not exceed the lifetime + // of `reader`'s underlying buffer. + FixedLengthDeltaDecoder(BitstreamReader reader, const FixedLengthEncodingParameters& params, absl::optional base, size_t num_of_deltas); @@ -644,17 +597,7 @@ class FixedLengthDeltaDecoder final { // Perform the decoding using the parameters given to the ctor. std::vector> Decode(); - // Decode a varint and write it to |output|. Return value indicates success - // or failure. In case of failure, no guarantees are made about the contents - // of |output| or the results of additional reads. - bool ParseVarInt(uint64_t* output); - - // Attempt to parse a delta from the input reader. - // Returns true/false for success/failure. - // Writes the delta into |delta| if successful. - bool ParseDelta(uint64_t* delta); - - // Add |delta| to |base| to produce the next value in a sequence. + // Add `delta` to `base` to produce the next value in a sequence. // The delta is applied as signed/unsigned depending on the parameters // given to the ctor. Wrap-around is taken into account according to the // values' width, as specified by the aforementioned encoding parameters. @@ -666,7 +609,7 @@ class FixedLengthDeltaDecoder final { // Reader of the input stream to be decoded. Does not own that buffer. // See comment above ctor for details. - const std::unique_ptr reader_; + BitstreamReader reader_; // The parameters according to which encoding will be done (width of // fields, whether signed deltas should be used, etc.) @@ -674,28 +617,20 @@ class FixedLengthDeltaDecoder final { // The encoding scheme assumes that at least one value is transmitted OOB, // so that the first value can be encoded as a delta from that OOB value, - // which is |base_|. + // which is `base_`. const absl::optional base_; // The number of values to be known to be decoded. const size_t num_of_deltas_; - - RTC_DISALLOW_COPY_AND_ASSIGN(FixedLengthDeltaDecoder); }; bool FixedLengthDeltaDecoder::IsSuitableDecoderFor(const std::string& input) { - if (input.length() < kBitsInHeaderForEncodingType) { + BitstreamReader reader(input); + uint64_t encoding_type_bits = reader.ReadBits(kBitsInHeaderForEncodingType); + if (!reader.Ok()) { return false; } - rtc::BitBuffer reader(reinterpret_cast(&input[0]), - kBitsInHeaderForEncodingType); - - uint32_t encoding_type_bits; - const bool result = - reader.ReadBits(kBitsInHeaderForEncodingType, encoding_type_bits); - RTC_DCHECK(result); - const auto encoding_type = static_cast(encoding_type_bits); return encoding_type == EncodingType::kFixedSizeUnsignedDeltasNoEarlyWrapNoOpt || @@ -719,18 +654,13 @@ std::unique_ptr FixedLengthDeltaDecoder::Create( const std::string& input, absl::optional base, size_t num_of_deltas) { - if (input.length() < kBitsInHeaderForEncodingType) { + BitstreamReader reader(input); + // Encoding type + uint32_t encoding_type_bits = reader.ReadBits(kBitsInHeaderForEncodingType); + if (!reader.Ok()) { return nullptr; } - auto reader = std::make_unique( - reinterpret_cast(&input[0]), input.length()); - - // Encoding type - uint32_t encoding_type_bits; - const bool result = - reader->ReadBits(kBitsInHeaderForEncodingType, encoding_type_bits); - RTC_DCHECK(result); const EncodingType encoding = static_cast(encoding_type_bits); if (encoding != EncodingType::kFixedSizeUnsignedDeltasNoEarlyWrapNoOpt && encoding != @@ -739,15 +669,10 @@ std::unique_ptr FixedLengthDeltaDecoder::Create( return nullptr; } - uint32_t read_buffer; - - // delta_width_bits - if (!reader->ReadBits(kBitsInHeaderForDeltaWidthBits, read_buffer)) { - return nullptr; - } - RTC_DCHECK_LE(read_buffer, 64 - 1); // See encoding for -1's rationale. + // See encoding for +1's rationale. const uint64_t delta_width_bits = - read_buffer + 1; // See encoding for +1's rationale. + reader.ReadBits(kBitsInHeaderForDeltaWidthBits) + 1; + RTC_DCHECK_LE(delta_width_bits, 64); // signed_deltas, values_optional, value_width_bits bool signed_deltas; @@ -758,25 +683,15 @@ std::unique_ptr FixedLengthDeltaDecoder::Create( values_optional = kDefaultValuesOptional; value_width_bits = kDefaultValueWidthBits; } else { - // signed_deltas - if (!reader->ReadBits(kBitsInHeaderForSignedDeltas, read_buffer)) { - return nullptr; - } - signed_deltas = rtc::dchecked_cast(read_buffer); - - // values_optional - if (!reader->ReadBits(kBitsInHeaderForValuesOptional, read_buffer)) { - return nullptr; - } - RTC_DCHECK_LE(read_buffer, 1); - values_optional = rtc::dchecked_cast(read_buffer); + signed_deltas = reader.Read(); + values_optional = reader.Read(); + // See encoding for +1's rationale. + value_width_bits = reader.ReadBits(kBitsInHeaderForValueWidthBits) + 1; + RTC_DCHECK_LE(value_width_bits, 64); + } - // value_width_bits - if (!reader->ReadBits(kBitsInHeaderForValueWidthBits, read_buffer)) { - return nullptr; - } - RTC_DCHECK_LE(read_buffer, 64 - 1); // See encoding for -1's rationale. - value_width_bits = read_buffer + 1; // See encoding for +1's rationale. + if (!reader.Ok()) { + return nullptr; } // Note: Because of the way the parameters are read, it is not possible @@ -790,35 +705,28 @@ std::unique_ptr FixedLengthDeltaDecoder::Create( FixedLengthEncodingParameters params(delta_width_bits, signed_deltas, values_optional, value_width_bits); - return absl::WrapUnique(new FixedLengthDeltaDecoder(std::move(reader), params, - base, num_of_deltas)); + return absl::WrapUnique( + new FixedLengthDeltaDecoder(reader, params, base, num_of_deltas)); } FixedLengthDeltaDecoder::FixedLengthDeltaDecoder( - std::unique_ptr reader, + BitstreamReader reader, const FixedLengthEncodingParameters& params, absl::optional base, size_t num_of_deltas) - : reader_(std::move(reader)), + : reader_(reader), params_(params), base_(base), num_of_deltas_(num_of_deltas) { - RTC_DCHECK(reader_); + RTC_DCHECK(reader_.Ok()); } std::vector> FixedLengthDeltaDecoder::Decode() { - RTC_DCHECK(reader_); - + RTC_DCHECK(reader_.Ok()); std::vector existing_values(num_of_deltas_); if (params_.values_optional()) { for (size_t i = 0; i < num_of_deltas_; ++i) { - uint32_t exists; - if (!reader_->ReadBits(1u, exists)) { - RTC_LOG(LS_WARNING) << "Failed to read existence-indicating bit."; - return std::vector>(); - } - RTC_DCHECK_LE(exists, 1u); - existing_values[i] = (exists == 1); + existing_values[i] = reader_.Read(); } } else { std::fill(existing_values.begin(), existing_values.end(), true); @@ -837,64 +745,20 @@ std::vector> FixedLengthDeltaDecoder::Decode() { // If the base is non-existent, the first existent value is encoded as // a varint, rather than as a delta. RTC_DCHECK(!base_.has_value()); - uint64_t first_value; - if (!ParseVarInt(&first_value)) { - RTC_LOG(LS_WARNING) << "Failed to read first value."; - return std::vector>(); - } - values[i] = first_value; + values[i] = DecodeVarInt(reader_); } else { - uint64_t delta; - if (!ParseDelta(&delta)) { - return std::vector>(); - } - values[i] = ApplyDelta(previous.value(), delta); + uint64_t delta = reader_.ReadBits(params_.delta_width_bits()); + values[i] = ApplyDelta(*previous, delta); } previous = values[i]; } - return values; -} - -bool FixedLengthDeltaDecoder::ParseVarInt(uint64_t* output) { - RTC_DCHECK(reader_); - return DecodeVarInt(reader_.get(), output) != 0; -} - -bool FixedLengthDeltaDecoder::ParseDelta(uint64_t* delta) { - RTC_DCHECK(reader_); - - // BitBuffer and BitBufferWriter read/write higher bits before lower bits. - - const size_t lower_bit_count = - std::min(params_.delta_width_bits(), 32u); - const size_t higher_bit_count = (params_.delta_width_bits() <= 32u) - ? 0 - : params_.delta_width_bits() - 32u; - - uint32_t lower_bits; - uint32_t higher_bits; - - if (higher_bit_count > 0) { - if (!reader_->ReadBits(higher_bit_count, higher_bits)) { - RTC_LOG(LS_WARNING) << "Failed to read higher half of delta."; - return false; - } - } else { - higher_bits = 0; - } - - if (!reader_->ReadBits(lower_bit_count, lower_bits)) { - RTC_LOG(LS_WARNING) << "Failed to read lower half of delta."; - return false; + if (!reader_.Ok()) { + values = {}; } - const uint64_t lower_bits_64 = static_cast(lower_bits); - const uint64_t higher_bits_64 = static_cast(higher_bits); - - *delta = (higher_bits_64 << 32) | lower_bits_64; - return true; + return values; } uint64_t FixedLengthDeltaDecoder::ApplyDelta(uint64_t base, diff --git a/logging/rtc_event_log/encoder/delta_encoding.h b/logging/rtc_event_log/encoder/delta_encoding.h index 5e86a986f6..614012acd3 100644 --- a/logging/rtc_event_log/encoder/delta_encoding.h +++ b/logging/rtc_event_log/encoder/delta_encoding.h @@ -21,10 +21,10 @@ namespace webrtc { -// Encode |values| as a sequence of deltas following on |base| and return it. +// Encode `values` as a sequence of deltas following on `base` and return it. // If all of the values were equal to the base, an empty string will be // returned; this is a valid encoding of that edge case. -// |base| is not guaranteed to be written into |output|, and must therefore +// `base` is not guaranteed to be written into `output`, and must therefore // be provided separately to the decoder. // This function never fails. // TODO(eladalon): Split into optional and non-optional variants (efficiency). @@ -34,8 +34,8 @@ std::string EncodeDeltas(absl::optional base, // EncodeDeltas() and DecodeDeltas() are inverse operations; // invoking DecodeDeltas() over the output of EncodeDeltas(), will return // the input originally given to EncodeDeltas(). -// |num_of_deltas| must be greater than zero. If input is not a valid encoding -// of |num_of_deltas| elements based on |base|, the function returns an empty +// `num_of_deltas` must be greater than zero. If input is not a valid encoding +// of `num_of_deltas` elements based on `base`, the function returns an empty // vector, which signals an error. // TODO(eladalon): Split into optional and non-optional variants (efficiency). std::vector> DecodeDeltas( diff --git a/logging/rtc_event_log/encoder/delta_encoding_unittest.cc b/logging/rtc_event_log/encoder/delta_encoding_unittest.cc index b385b8998a..d0f7fb93db 100644 --- a/logging/rtc_event_log/encoder/delta_encoding_unittest.cc +++ b/logging/rtc_event_log/encoder/delta_encoding_unittest.cc @@ -44,7 +44,7 @@ void MaybeSetSignedness(DeltaSignedness signedness) { SetFixedLengthEncoderDeltaSignednessForTesting(true); return; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } uint64_t RandomWithMaxBitWidth(Random* prng, uint64_t max_width) { @@ -64,9 +64,9 @@ uint64_t RandomWithMaxBitWidth(Random* prng, uint64_t max_width) { } } -// Encodes |values| based on |base|, then decodes the result and makes sure +// Encodes `values` based on `base`, then decodes the result and makes sure // that it is equal to the original input. -// If |encoded_string| is non-null, the encoded result will also be written +// If `encoded_string` is non-null, the encoded result will also be written // into it. void TestEncodingAndDecoding( absl::optional base, @@ -100,7 +100,7 @@ std::vector> CreateSequenceByLastValue( return result; } -// If |sequence_length| is greater than the number of deltas, the sequence of +// If `sequence_length` is greater than the number of deltas, the sequence of // deltas will wrap around. std::vector> CreateSequenceByOptionalDeltas( uint64_t first, @@ -141,7 +141,7 @@ size_t EncodingLengthUpperBound(size_t delta_max_bit_width, return delta_max_bit_width * num_of_deltas + *smallest_header_size_bytes; } -// If |sequence_length| is greater than the number of deltas, the sequence of +// If `sequence_length` is greater than the number of deltas, the sequence of // deltas will wrap around. std::vector> CreateSequenceByDeltas( uint64_t first, @@ -502,7 +502,7 @@ TEST_P(DeltaEncodingCompressionQualityTest, uint64_t last_element[arraysize(bases)]; memcpy(last_element, bases, sizeof(bases)); - // Avoid empty |deltas| due to first element causing wrap-around. + // Avoid empty `deltas` due to first element causing wrap-around. deltas[0] = 1; for (size_t i = 0; i < arraysize(last_element); ++i) { last_element[i] += 1; diff --git a/logging/rtc_event_log/encoder/rtc_event_log_encoder_common.h b/logging/rtc_event_log/encoder/rtc_event_log_encoder_common.h index 429f8ed2ad..c167a8eb8f 100644 --- a/logging/rtc_event_log/encoder/rtc_event_log_encoder_common.h +++ b/logging/rtc_event_log/encoder/rtc_event_log_encoder_common.h @@ -55,7 +55,7 @@ uint64_t ToUnsigned(T y) { } } -// Assuming x = ToUnsigned(y), return |y|. +// Assuming x = ToUnsigned(y), return `y`. // Note: static_cast(x) would work on most platforms and compilers, but // involves undefined behavior. This function is well-defined, and can be // optimized to a noop for 64 bit types, or a few arithmetic @@ -74,7 +74,7 @@ bool ToSigned(uint64_t x, T* y) { using UNSIGNED_T = typename std::make_unsigned::type; constexpr auto MAX_UNSIGNED_T = std::numeric_limits::max(); if (x > static_cast(MAX_UNSIGNED_T)) { - return false; // |x| cannot be represented using a T. + return false; // `x` cannot be represented using a T. } if (x <= static_cast(MAX_T)) { diff --git a/logging/rtc_event_log/encoder/rtc_event_log_encoder_legacy.cc b/logging/rtc_event_log/encoder/rtc_event_log_encoder_legacy.cc index 2bd7507853..add42ad15b 100644 --- a/logging/rtc_event_log/encoder/rtc_event_log_encoder_legacy.cc +++ b/logging/rtc_event_log/encoder/rtc_event_log_encoder_legacy.cc @@ -78,9 +78,9 @@ rtclog::DelayBasedBweUpdate::DetectorState ConvertDetectorState( case BandwidthUsage::kBwOverusing: return rtclog::DelayBasedBweUpdate::BWE_OVERUSING; case BandwidthUsage::kLast: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog::DelayBasedBweUpdate::BWE_NORMAL; } @@ -94,9 +94,9 @@ rtclog::BweProbeResult::ResultType ConvertProbeResultType( case ProbeFailureReason::kTimeout: return rtclog::BweProbeResult::TIMEOUT; case ProbeFailureReason::kLast: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog::BweProbeResult::SUCCESS; } @@ -107,9 +107,9 @@ rtclog::VideoReceiveConfig_RtcpMode ConvertRtcpMode(RtcpMode rtcp_mode) { case RtcpMode::kReducedSize: return rtclog::VideoReceiveConfig::RTCP_REDUCEDSIZE; case RtcpMode::kOff: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog::VideoReceiveConfig::RTCP_COMPOUND; } @@ -125,9 +125,9 @@ ConvertIceCandidatePairConfigType(IceCandidatePairConfigType type) { case IceCandidatePairConfigType::kSelected: return rtclog::IceCandidatePairConfig::SELECTED; case IceCandidatePairConfigType::kNumValues: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog::IceCandidatePairConfig::ADDED; } @@ -145,9 +145,9 @@ rtclog::IceCandidatePairConfig::IceCandidateType ConvertIceCandidateType( case IceCandidateType::kRelay: return rtclog::IceCandidatePairConfig::RELAY; case IceCandidateType::kNumValues: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog::IceCandidatePairConfig::UNKNOWN_CANDIDATE_TYPE; } @@ -165,9 +165,9 @@ rtclog::IceCandidatePairConfig::Protocol ConvertIceCandidatePairProtocol( case IceCandidatePairProtocol::kTls: return rtclog::IceCandidatePairConfig::TLS; case IceCandidatePairProtocol::kNumValues: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog::IceCandidatePairConfig::UNKNOWN_PROTOCOL; } @@ -182,9 +182,9 @@ ConvertIceCandidatePairAddressFamily( case IceCandidatePairAddressFamily::kIpv6: return rtclog::IceCandidatePairConfig::IPV6; case IceCandidatePairAddressFamily::kNumValues: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog::IceCandidatePairConfig::UNKNOWN_ADDRESS_FAMILY; } @@ -204,9 +204,9 @@ rtclog::IceCandidatePairConfig::NetworkType ConvertIceCandidateNetworkType( case IceCandidateNetworkType::kCellular: return rtclog::IceCandidatePairConfig::CELLULAR; case IceCandidateNetworkType::kNumValues: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog::IceCandidatePairConfig::UNKNOWN_NETWORK_TYPE; } @@ -222,9 +222,9 @@ ConvertIceCandidatePairEventType(IceCandidatePairEventType type) { case IceCandidatePairEventType::kCheckResponseReceived: return rtclog::IceCandidatePairEvent::CHECK_RESPONSE_RECEIVED; case IceCandidatePairEventType::kNumValues: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog::IceCandidatePairEvent::CHECK_SENT; } @@ -363,6 +363,12 @@ std::string RtcEventLogEncoderLegacy::Encode(const RtcEvent& event) { static_cast(event); return EncodeVideoSendStreamConfig(rtc_event); } + case RtcEvent::Type::BeginV3Log: + case RtcEvent::Type::EndV3Log: + // These special events are written as part of starting + // and stopping the log, and only as part of version 3 of the format. + RTC_DCHECK_NOTREACHED(); + break; case RtcEvent::Type::RouteChangeEvent: case RtcEvent::Type::RemoteEstimateEvent: case RtcEvent::Type::GenericPacketReceived: @@ -374,7 +380,7 @@ std::string RtcEventLogEncoderLegacy::Encode(const RtcEvent& event) { } int event_type = static_cast(event.GetType()); - RTC_NOTREACHED() << "Unknown event type (" << event_type << ")"; + RTC_DCHECK_NOTREACHED() << "Unknown event type (" << event_type << ")"; return ""; } @@ -674,7 +680,7 @@ std::string RtcEventLogEncoderLegacy::EncodeVideoSendStreamConfig( encoder->set_payload_type(codec.payload_type); if (event.config().codecs.size() > 1) { - RTC_LOG(WARNING) + RTC_LOG(LS_WARNING) << "LogVideoSendStreamConfig currently only supports one " "codec. Logging codec :" << codec.payload_name; diff --git a/logging/rtc_event_log/encoder/rtc_event_log_encoder_new_format.cc b/logging/rtc_event_log/encoder/rtc_event_log_encoder_new_format.cc index 9400c864bf..d88f124f9e 100644 --- a/logging/rtc_event_log/encoder/rtc_event_log_encoder_new_format.cc +++ b/logging/rtc_event_log/encoder/rtc_event_log_encoder_new_format.cc @@ -86,9 +86,9 @@ rtclog2::DelayBasedBweUpdates::DetectorState ConvertToProtoFormat( case BandwidthUsage::kBwOverusing: return rtclog2::DelayBasedBweUpdates::BWE_OVERUSING; case BandwidthUsage::kLast: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog2::DelayBasedBweUpdates::BWE_UNKNOWN_STATE; } @@ -108,7 +108,7 @@ rtclog2::FrameDecodedEvents::Codec ConvertToProtoFormat(VideoCodecType codec) { // This codec type is afaik not used. return rtclog2::FrameDecodedEvents::CODEC_UNKNOWN; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog2::FrameDecodedEvents::CODEC_UNKNOWN; } @@ -122,16 +122,16 @@ rtclog2::BweProbeResultFailure::FailureReason ConvertToProtoFormat( case ProbeFailureReason::kTimeout: return rtclog2::BweProbeResultFailure::TIMEOUT; case ProbeFailureReason::kLast: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog2::BweProbeResultFailure::UNKNOWN; } // Returns true if there are recognized extensions that we should log // and false if there are no extensions or all extensions are types we don't // log. The protobuf representation of the header configs is written to -// |proto_config|. +// `proto_config`. bool ConvertToProtoFormat(const std::vector& extensions, rtclog2::RtpHeaderExtensionConfig* proto_config) { size_t unknown_extensions = 0; @@ -167,9 +167,9 @@ rtclog2::DtlsTransportStateEvent::DtlsTransportState ConvertToProtoFormat( case webrtc::DtlsTransportState::kFailed: return rtclog2::DtlsTransportStateEvent::DTLS_TRANSPORT_FAILED; case webrtc::DtlsTransportState::kNumValues: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog2::DtlsTransportStateEvent::UNKNOWN_DTLS_TRANSPORT_STATE; } @@ -185,9 +185,9 @@ ConvertToProtoFormat(IceCandidatePairConfigType type) { case IceCandidatePairConfigType::kSelected: return rtclog2::IceCandidatePairConfig::SELECTED; case IceCandidatePairConfigType::kNumValues: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog2::IceCandidatePairConfig::UNKNOWN_CONFIG_TYPE; } @@ -205,9 +205,9 @@ rtclog2::IceCandidatePairConfig::IceCandidateType ConvertToProtoFormat( case IceCandidateType::kRelay: return rtclog2::IceCandidatePairConfig::RELAY; case IceCandidateType::kNumValues: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog2::IceCandidatePairConfig::UNKNOWN_CANDIDATE_TYPE; } @@ -225,9 +225,9 @@ rtclog2::IceCandidatePairConfig::Protocol ConvertToProtoFormat( case IceCandidatePairProtocol::kTls: return rtclog2::IceCandidatePairConfig::TLS; case IceCandidatePairProtocol::kNumValues: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog2::IceCandidatePairConfig::UNKNOWN_PROTOCOL; } @@ -241,9 +241,9 @@ rtclog2::IceCandidatePairConfig::AddressFamily ConvertToProtoFormat( case IceCandidatePairAddressFamily::kIpv6: return rtclog2::IceCandidatePairConfig::IPV6; case IceCandidatePairAddressFamily::kNumValues: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog2::IceCandidatePairConfig::UNKNOWN_ADDRESS_FAMILY; } @@ -263,9 +263,9 @@ rtclog2::IceCandidatePairConfig::NetworkType ConvertToProtoFormat( case IceCandidateNetworkType::kCellular: return rtclog2::IceCandidatePairConfig::CELLULAR; case IceCandidateNetworkType::kNumValues: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog2::IceCandidatePairConfig::UNKNOWN_NETWORK_TYPE; } @@ -281,14 +281,14 @@ rtclog2::IceCandidatePairEvent::IceCandidatePairEventType ConvertToProtoFormat( case IceCandidatePairEventType::kCheckResponseReceived: return rtclog2::IceCandidatePairEvent::CHECK_RESPONSE_RECEIVED; case IceCandidatePairEventType::kNumValues: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return rtclog2::IceCandidatePairEvent::UNKNOWN_CHECK_TYPE; } -// Copies all RTCP blocks except APP, SDES and unknown from |packet| to -// |buffer|. |buffer| must have space for at least |packet.size()| bytes. +// Copies all RTCP blocks except APP, SDES and unknown from `packet` to +// `buffer`. `buffer` must have space for at least `packet.size()` bytes. size_t RemoveNonAllowlistedRtcpBlocks(const rtc::Buffer& packet, uint8_t* buffer) { RTC_DCHECK(buffer != nullptr); @@ -880,6 +880,12 @@ std::string RtcEventLogEncoderNewFormat::EncodeBatch( frames_decoded[rtc_event->ssrc()].emplace_back(rtc_event); break; } + case RtcEvent::Type::BeginV3Log: + case RtcEvent::Type::EndV3Log: + // These special events are written as part of starting + // and stopping the log, and only as part of version 3 of the format. + RTC_DCHECK_NOTREACHED(); + break; } } @@ -956,7 +962,7 @@ void RtcEventLogEncoderNewFormat::EncodeAudioNetworkAdaptation( proto_batch->set_enable_fec(base_event->config().enable_fec.value()); if (base_event->config().enable_dtx.has_value()) proto_batch->set_enable_dtx(base_event->config().enable_dtx.value()); - // Note that |num_channels_deltas| encodes N as N-1, to keep deltas smaller, + // Note that `num_channels_deltas` encodes N as N-1, to keep deltas smaller, // but there's no reason to do the same for the base event's value, since // no bits will be spared. if (base_event->config().num_channels.has_value()) diff --git a/logging/rtc_event_log/encoder/rtc_event_log_encoder_unittest.cc b/logging/rtc_event_log/encoder/rtc_event_log_encoder_unittest.cc index 063d425af5..be9352ab94 100644 --- a/logging/rtc_event_log/encoder/rtc_event_log_encoder_unittest.cc +++ b/logging/rtc_event_log/encoder/rtc_event_log_encoder_unittest.cc @@ -16,6 +16,7 @@ #include "logging/rtc_event_log/encoder/rtc_event_log_encoder_legacy.h" #include "logging/rtc_event_log/encoder/rtc_event_log_encoder_new_format.h" +#include "logging/rtc_event_log/encoder/rtc_event_log_encoder_v3.h" #include "logging/rtc_event_log/events/rtc_event_alr_state.h" #include "logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h" #include "logging/rtc_event_log/events/rtc_event_audio_playout.h" @@ -61,6 +62,9 @@ class RtcEventLogEncoderTest case RtcEventLog::EncodingType::NewFormat: encoder_ = std::make_unique(); break; + case RtcEventLog::EncodingType::ProtoFree: + encoder_ = std::make_unique(); + break; } encoded_ = encoder_->EncodeLogStart(rtc::TimeMillis(), rtc::TimeUTCMillis()); @@ -174,7 +178,7 @@ void RtcEventLogEncoderTest::TestRtpPackets() { extension_map = gen_.NewRtpHeaderExtensionMap(true); } - // Simulate |event_count_| RTP packets, with SSRCs assigned randomly + // Simulate `event_count_` RTP packets, with SSRCs assigned randomly // out of the small pool above. std::map>> events_by_ssrc; for (size_t i = 0; i < event_count_; ++i) { @@ -1285,6 +1289,9 @@ class RtcEventLogEncoderSimpleTest case RtcEventLog::EncodingType::NewFormat: encoder_ = std::make_unique(); break; + case RtcEventLog::EncodingType::ProtoFree: + encoder_ = std::make_unique(); + break; } encoded_ = encoder_->EncodeLogStart(rtc::TimeMillis(), rtc::TimeUTCMillis()); diff --git a/logging/rtc_event_log/encoder/rtc_event_log_encoder_v3.cc b/logging/rtc_event_log/encoder/rtc_event_log_encoder_v3.cc new file mode 100644 index 0000000000..131aae1de8 --- /dev/null +++ b/logging/rtc_event_log/encoder/rtc_event_log_encoder_v3.cc @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "logging/rtc_event_log/encoder/rtc_event_log_encoder_v3.h" + +#include +#include + +#include "absl/types/optional.h" +#include "logging/rtc_event_log/encoder/rtc_event_log_encoder_common.h" +#include "logging/rtc_event_log/encoder/var_int.h" +#include "logging/rtc_event_log/events/rtc_event_alr_state.h" +#include "logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h" +#include "logging/rtc_event_log/events/rtc_event_audio_playout.h" +#include "logging/rtc_event_log/events/rtc_event_audio_receive_stream_config.h" +#include "logging/rtc_event_log/events/rtc_event_audio_send_stream_config.h" +#include "logging/rtc_event_log/events/rtc_event_begin_log.h" +#include "logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.h" +#include "logging/rtc_event_log/events/rtc_event_bwe_update_loss_based.h" +#include "logging/rtc_event_log/events/rtc_event_dtls_transport_state.h" +#include "logging/rtc_event_log/events/rtc_event_dtls_writable_state.h" +#include "logging/rtc_event_log/events/rtc_event_end_log.h" +#include "logging/rtc_event_log/events/rtc_event_frame_decoded.h" +#include "logging/rtc_event_log/events/rtc_event_generic_ack_received.h" +#include "logging/rtc_event_log/events/rtc_event_generic_packet_received.h" +#include "logging/rtc_event_log/events/rtc_event_generic_packet_sent.h" +#include "logging/rtc_event_log/events/rtc_event_ice_candidate_pair.h" +#include "logging/rtc_event_log/events/rtc_event_ice_candidate_pair_config.h" +#include "logging/rtc_event_log/events/rtc_event_probe_cluster_created.h" +#include "logging/rtc_event_log/events/rtc_event_probe_result_failure.h" +#include "logging/rtc_event_log/events/rtc_event_probe_result_success.h" +#include "logging/rtc_event_log/events/rtc_event_remote_estimate.h" +#include "logging/rtc_event_log/events/rtc_event_route_change.h" +#include "logging/rtc_event_log/events/rtc_event_rtcp_packet_incoming.h" +#include "logging/rtc_event_log/events/rtc_event_rtcp_packet_outgoing.h" +#include "logging/rtc_event_log/events/rtc_event_rtp_packet_incoming.h" +#include "logging/rtc_event_log/events/rtc_event_rtp_packet_outgoing.h" +#include "logging/rtc_event_log/events/rtc_event_video_receive_stream_config.h" +#include "logging/rtc_event_log/events/rtc_event_video_send_stream_config.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +std::string RtcEventLogEncoderV3::EncodeLogStart(int64_t timestamp_us, + int64_t utc_time_us) { + std::unique_ptr begin_log = + std::make_unique(Timestamp::Micros(timestamp_us), + Timestamp::Micros(utc_time_us)); + std::vector batch; + batch.push_back(begin_log.get()); + + std::string encoded_event = RtcEventBeginLog::Encode(batch); + + return encoded_event; +} + +std::string RtcEventLogEncoderV3::EncodeLogEnd(int64_t timestamp_us) { + std::unique_ptr end_log = + std::make_unique(Timestamp::Micros(timestamp_us)); + std::vector batch; + batch.push_back(end_log.get()); + + std::string encoded_event = RtcEventEndLog::Encode(batch); + + return encoded_event; +} + +RtcEventLogEncoderV3::RtcEventLogEncoderV3() { + encoders_[RtcEvent::Type::AlrStateEvent] = RtcEventAlrState::Encode; + encoders_[RtcEvent::Type::AudioNetworkAdaptation] = + RtcEventAudioNetworkAdaptation::Encode; + encoders_[RtcEvent::Type::AudioPlayout] = RtcEventAudioPlayout::Encode; + encoders_[RtcEvent::Type::AudioReceiveStreamConfig] = + RtcEventAudioReceiveStreamConfig::Encode; + encoders_[RtcEvent::Type::AudioSendStreamConfig] = + RtcEventAudioSendStreamConfig::Encode; + encoders_[RtcEvent::Type::BweUpdateDelayBased] = + RtcEventBweUpdateDelayBased::Encode; + encoders_[RtcEvent::Type::BweUpdateLossBased] = + RtcEventBweUpdateLossBased::Encode; + encoders_[RtcEvent::Type::DtlsTransportState] = + RtcEventDtlsTransportState::Encode; + encoders_[RtcEvent::Type::DtlsWritableState] = + RtcEventDtlsWritableState::Encode; + encoders_[RtcEvent::Type::FrameDecoded] = RtcEventFrameDecoded::Encode; + encoders_[RtcEvent::Type::GenericAckReceived] = + RtcEventGenericAckReceived::Encode; + encoders_[RtcEvent::Type::GenericPacketReceived] = + RtcEventGenericPacketReceived::Encode; + encoders_[RtcEvent::Type::GenericPacketSent] = + RtcEventGenericPacketSent::Encode; + encoders_[RtcEvent::Type::IceCandidatePairConfig] = + RtcEventIceCandidatePairConfig::Encode; + encoders_[RtcEvent::Type::IceCandidatePairEvent] = + RtcEventIceCandidatePair::Encode; + encoders_[RtcEvent::Type::ProbeClusterCreated] = + RtcEventProbeClusterCreated::Encode; + encoders_[RtcEvent::Type::ProbeResultFailure] = + RtcEventProbeResultFailure::Encode; + encoders_[RtcEvent::Type::ProbeResultSuccess] = + RtcEventProbeResultSuccess::Encode; + encoders_[RtcEvent::Type::RemoteEstimateEvent] = + RtcEventRemoteEstimate::Encode; + encoders_[RtcEvent::Type::RouteChangeEvent] = RtcEventRouteChange::Encode; + encoders_[RtcEvent::Type::RtcpPacketIncoming] = + RtcEventRtcpPacketIncoming::Encode; + encoders_[RtcEvent::Type::RtcpPacketOutgoing] = + RtcEventRtcpPacketOutgoing::Encode; + encoders_[RtcEvent::Type::RtpPacketIncoming] = + RtcEventRtpPacketIncoming::Encode; + encoders_[RtcEvent::Type::RtpPacketOutgoing] = + RtcEventRtpPacketOutgoing::Encode; + encoders_[RtcEvent::Type::VideoReceiveStreamConfig] = + RtcEventVideoReceiveStreamConfig::Encode; + encoders_[RtcEvent::Type::VideoSendStreamConfig] = + RtcEventVideoSendStreamConfig::Encode; +} + +std::string RtcEventLogEncoderV3::EncodeBatch( + std::deque>::const_iterator begin, + std::deque>::const_iterator end) { + struct EventGroupKey { + // Events are grouped by event type. For compression efficiency, + // events can optionally have a secondary key, in most cases the + // SSRC. + RtcEvent::Type type; + uint32_t secondary_group_key; + + bool operator<(EventGroupKey other) const { + return type < other.type || + (type == other.type && + secondary_group_key < other.secondary_group_key); + } + }; + + std::map> event_groups; + + for (auto it = begin; it != end; ++it) { + event_groups[{(*it)->GetType(), (*it)->GetGroupKey()}].push_back(it->get()); + } + + std::string encoded_output; + for (auto& kv : event_groups) { + auto it = encoders_.find(kv.first.type); + RTC_DCHECK(it != encoders_.end()); + if (it != encoders_.end()) { + auto& encoder = it->second; + // TODO(terelius): Use some "string builder" or preallocate? + encoded_output += encoder(kv.second); + } + } + + return encoded_output; +} + +} // namespace webrtc diff --git a/logging/rtc_event_log/encoder/rtc_event_log_encoder_v3.h b/logging/rtc_event_log/encoder/rtc_event_log_encoder_v3.h new file mode 100644 index 0000000000..cb796ec562 --- /dev/null +++ b/logging/rtc_event_log/encoder/rtc_event_log_encoder_v3.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef LOGGING_RTC_EVENT_LOG_ENCODER_RTC_EVENT_LOG_ENCODER_V3_H_ +#define LOGGING_RTC_EVENT_LOG_ENCODER_RTC_EVENT_LOG_ENCODER_V3_H_ + +#include +#include +#include +#include + +#include "api/array_view.h" +#include "logging/rtc_event_log/encoder/rtc_event_log_encoder.h" +#include "logging/rtc_event_log/events/rtc_event_definition.h" + +namespace webrtc { + +class RtcEventLogEncoderV3 final : public RtcEventLogEncoder { + public: + RtcEventLogEncoderV3(); + ~RtcEventLogEncoderV3() override = default; + + std::string EncodeBatch( + std::deque>::const_iterator begin, + std::deque>::const_iterator end) override; + + std::string EncodeLogStart(int64_t timestamp_us, + int64_t utc_time_us) override; + std::string EncodeLogEnd(int64_t timestamp_us) override; + + private: + std::map)>> + encoders_; +}; + +} // namespace webrtc + +#endif // LOGGING_RTC_EVENT_LOG_ENCODER_RTC_EVENT_LOG_ENCODER_V3_H_ diff --git a/logging/rtc_event_log/encoder/var_int.cc b/logging/rtc_event_log/encoder/var_int.cc index f2819c0c73..a84a233d6b 100644 --- a/logging/rtc_event_log/encoder/var_int.cc +++ b/logging/rtc_event_log/encoder/var_int.cc @@ -10,6 +10,7 @@ #include "logging/rtc_event_log/encoder/var_int.h" +#include "rtc_base/bitstream_reader.h" #include "rtc_base/checks.h" // TODO(eladalon): Add unit tests. @@ -58,23 +59,18 @@ std::pair DecodeVarInt(absl::string_view input, // There is some code duplication between the flavors of this function. // For performance's sake, it's best to just keep it. -size_t DecodeVarInt(rtc::BitBuffer* input, uint64_t* output) { - RTC_DCHECK(output); - +uint64_t DecodeVarInt(BitstreamReader& input) { uint64_t decoded = 0; for (size_t i = 0; i < kMaxVarIntLengthBytes; ++i) { - uint8_t byte; - if (!input->ReadUInt8(byte)) { - return 0; - } + uint8_t byte = input.Read(); decoded += (static_cast(byte & 0x7f) << static_cast(7 * i)); if (!(byte & 0x80)) { - *output = decoded; - return i + 1; + return decoded; } } + input.Invalidate(); return 0; } diff --git a/logging/rtc_event_log/encoder/var_int.h b/logging/rtc_event_log/encoder/var_int.h index dbe1f1103f..4624e046ba 100644 --- a/logging/rtc_event_log/encoder/var_int.h +++ b/logging/rtc_event_log/encoder/var_int.h @@ -18,7 +18,7 @@ #include #include "absl/strings/string_view.h" -#include "rtc_base/bit_buffer.h" +#include "rtc_base/bitstream_reader.h" namespace webrtc { @@ -39,13 +39,11 @@ std::string EncodeVarInt(uint64_t input); std::pair DecodeVarInt(absl::string_view input, uint64_t* output); -// Same as other version, but uses a rtc::BitBuffer for input. -// If decoding is successful, a non-zero number is returned, indicating the -// number of bytes read from `input`, and the decoded varint is written -// into `output`. -// If not successful, 0 is returned, and `output` is not modified. -// Some bits may be consumed even if a varint fails to be read. -size_t DecodeVarInt(rtc::BitBuffer* input, uint64_t* output); +// Same as other version, but uses a BitstreamReader for input. +// If decoding is successful returns the decoded varint. +// If not successful, `input` reader is set into the failure state, return value +// is unspecified. +uint64_t DecodeVarInt(BitstreamReader& input); } // namespace webrtc diff --git a/logging/rtc_event_log/events/fixed_length_encoding_parameters_v3.cc b/logging/rtc_event_log/events/fixed_length_encoding_parameters_v3.cc new file mode 100644 index 0000000000..0c93e6226d --- /dev/null +++ b/logging/rtc_event_log/events/fixed_length_encoding_parameters_v3.cc @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "logging/rtc_event_log/events/fixed_length_encoding_parameters_v3.h" + +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "logging/rtc_event_log/events/rtc_event_field_extraction.h" +#include "rtc_base/logging.h" + +using webrtc_event_logging::MaxUnsignedValueOfBitWidth; +using webrtc_event_logging::SignedBitWidth; +using webrtc_event_logging::UnsignedBitWidth; +using webrtc_event_logging::UnsignedDelta; + +namespace webrtc { + +FixedLengthEncodingParametersV3 +FixedLengthEncodingParametersV3::CalculateParameters( + uint64_t base, + const rtc::ArrayView values, + uint64_t value_bit_width, + bool values_optional) { + // As a special case, if all of the elements are identical to the base + // we just encode the base value with a special delta header. + if (std::all_of(values.cbegin(), values.cend(), + [base](uint64_t val) { return val == base; })) { + // Delta header with signed=true and delta_bitwidth=64 + return FixedLengthEncodingParametersV3(/*delta_bit_width=*/64, + /*signed_deltas=*/true, + values_optional, value_bit_width); + } + + const uint64_t bit_mask = MaxUnsignedValueOfBitWidth(value_bit_width); + + // Calculate the bitwidth required to encode all deltas when using a + // unsigned or signed represenation, respectively. For the unsigned + // representation, we just track the largest delta. For the signed + // representation, we have two possibilities for each delta; either + // going "forward" (i.e. current - previous) or "backwards" + // (i.e. previous - current) where both values are calculated with + // wrap around. We then track the largest positive and negative + // magnitude across the batch, assuming that we choose the smaller + // delta for each element. + uint64_t max_unsigned_delta = 0; + uint64_t max_positive_signed_delta = 0; + uint64_t min_negative_signed_delta = 0; + uint64_t prev = base; + for (uint64_t current : values) { + uint64_t positive_delta = UnsignedDelta(prev, current, bit_mask); + uint64_t negative_delta = UnsignedDelta(current, prev, bit_mask); + + max_unsigned_delta = std::max(max_unsigned_delta, positive_delta); + + if (positive_delta < negative_delta) { + max_positive_signed_delta = + std::max(max_positive_signed_delta, positive_delta); + } else { + min_negative_signed_delta = + std::max(min_negative_signed_delta, negative_delta); + } + + prev = current; + } + + // We now know the largest unsigned delta and the largest magnitudes of + // positive and negative signed deltas. Get the bitwidths required for + // each of the two encodings. + const uint64_t unsigned_delta_bit_width = + UnsignedBitWidth(max_unsigned_delta); + const uint64_t signed_delta_bit_width = + SignedBitWidth(max_positive_signed_delta, min_negative_signed_delta); + + // Note: Preference for unsigned if the two have the same width (efficiency). + bool use_signed_deltas = signed_delta_bit_width < unsigned_delta_bit_width; + uint64_t delta_bit_width = + use_signed_deltas ? signed_delta_bit_width : unsigned_delta_bit_width; + + // use_signed_deltas && delta_bit_width==64 is reserved for "all values + // equal". + RTC_DCHECK(!use_signed_deltas || delta_bit_width < 64); + + RTC_DCHECK(ValidParameters(delta_bit_width, use_signed_deltas, + values_optional, value_bit_width)); + return FixedLengthEncodingParametersV3(delta_bit_width, use_signed_deltas, + values_optional, value_bit_width); +} + +uint64_t FixedLengthEncodingParametersV3::DeltaHeaderAsInt() const { + uint64_t header = delta_bit_width_ - 1; + RTC_CHECK_LT(header, 1u << 6); + if (signed_deltas_) { + header += 1u << 6; + } + RTC_CHECK_LT(header, 1u << 7); + if (values_optional_) { + header += 1u << 7; + } + return header; +} + +absl::optional +FixedLengthEncodingParametersV3::ParseDeltaHeader(uint64_t header, + uint64_t value_bit_width) { + uint64_t delta_bit_width = (header & ((1u << 6) - 1)) + 1; + bool signed_deltas = header & (1u << 6); + bool values_optional = header & (1u << 7); + + if (header >= (1u << 8)) { + RTC_LOG(LS_ERROR) << "Failed to parse delta header; unread bits remaining."; + return absl::nullopt; + } + + if (!ValidParameters(delta_bit_width, signed_deltas, values_optional, + value_bit_width)) { + RTC_LOG(LS_ERROR) << "Failed to parse delta header. Invalid combination of " + "values: delta_bit_width=" + << delta_bit_width << " signed_deltas=" << signed_deltas + << " values_optional=" << values_optional + << " value_bit_width=" << value_bit_width; + return absl::nullopt; + } + + return FixedLengthEncodingParametersV3(delta_bit_width, signed_deltas, + values_optional, value_bit_width); +} + +} // namespace webrtc diff --git a/logging/rtc_event_log/events/fixed_length_encoding_parameters_v3.h b/logging/rtc_event_log/events/fixed_length_encoding_parameters_v3.h new file mode 100644 index 0000000000..666fae1c63 --- /dev/null +++ b/logging/rtc_event_log/events/fixed_length_encoding_parameters_v3.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef LOGGING_RTC_EVENT_LOG_EVENTS_FIXED_LENGTH_ENCODING_PARAMETERS_V3_H_ +#define LOGGING_RTC_EVENT_LOG_EVENTS_FIXED_LENGTH_ENCODING_PARAMETERS_V3_H_ + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "logging/rtc_event_log/events/rtc_event_field_extraction.h" + +namespace webrtc { + +// Parameters for fixed-size delta-encoding/decoding. +// These are tailored for the sequence which will be encoded (e.g. widths). +class FixedLengthEncodingParametersV3 final { + public: + static bool ValidParameters(uint64_t delta_bit_width, + bool signed_deltas, + bool values_optional, + uint64_t value_bit_width) { + return (1 <= delta_bit_width && delta_bit_width <= 64 && + 1 <= value_bit_width && value_bit_width <= 64 && + (delta_bit_width <= value_bit_width || + (signed_deltas && delta_bit_width == 64))); + } + + static FixedLengthEncodingParametersV3 CalculateParameters( + uint64_t base, + rtc::ArrayView values, + uint64_t value_bit_width, + bool values_optional); + static absl::optional ParseDeltaHeader( + uint64_t header, + uint64_t value_bit_width); + + uint64_t DeltaHeaderAsInt() const; + + // Number of bits necessary to hold the widest(*) of the deltas between the + // values in the sequence. + // (*) - Widest might not be the largest, if signed deltas are used. + uint64_t delta_bit_width() const { return delta_bit_width_; } + + // Whether deltas are signed. + bool signed_deltas() const { return signed_deltas_; } + + // Whether the values of the sequence are optional. That is, it may be + // that some of them do not have a value (not even a sentinel value indicating + // invalidity). + bool values_optional() const { return values_optional_; } + + // Whether all values are equal. 64-bit signed deltas are assumed to not + // occur, since those could equally well be represented using 64 bit unsigned + // deltas. + bool values_equal() const { + return delta_bit_width() == 64 && signed_deltas(); + } + + // Number of bits necessary to hold the largest value in the sequence. + uint64_t value_bit_width() const { return value_bit_width_; } + + // Masks where only the bits relevant to the deltas/values are turned on. + uint64_t delta_mask() const { return delta_mask_; } + uint64_t value_mask() const { return value_mask_; } + + private: + FixedLengthEncodingParametersV3(uint64_t delta_bit_width, + bool signed_deltas, + bool values_optional, + uint64_t value_bit_width) + : delta_bit_width_(delta_bit_width), + signed_deltas_(signed_deltas), + values_optional_(values_optional), + value_bit_width_(value_bit_width), + delta_mask_( + webrtc_event_logging::MaxUnsignedValueOfBitWidth(delta_bit_width_)), + value_mask_(webrtc_event_logging::MaxUnsignedValueOfBitWidth( + value_bit_width_)) {} + + uint64_t delta_bit_width_; + bool signed_deltas_; + bool values_optional_; + uint64_t value_bit_width_; + + uint64_t delta_mask_; + uint64_t value_mask_; +}; + +} // namespace webrtc +#endif // LOGGING_RTC_EVENT_LOG_EVENTS_FIXED_LENGTH_ENCODING_PARAMETERS_V3_H_ diff --git a/logging/rtc_event_log/events/logged_rtp_rtcp.h b/logging/rtc_event_log/events/logged_rtp_rtcp.h new file mode 100644 index 0000000000..179d70dd63 --- /dev/null +++ b/logging/rtc_event_log/events/logged_rtp_rtcp.h @@ -0,0 +1,243 @@ +/* + * Copyright 2022 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef LOGGING_RTC_EVENT_LOG_EVENTS_LOGGED_RTP_RTCP_H_ +#define LOGGING_RTC_EVENT_LOG_EVENTS_LOGGED_RTP_RTCP_H_ + +#include +#include + +#include "api/rtp_headers.h" +#include "api/units/timestamp.h" +#include "modules/rtp_rtcp/source/rtcp_packet/bye.h" +#include "modules/rtp_rtcp/source/rtcp_packet/extended_reports.h" +#include "modules/rtp_rtcp/source/rtcp_packet/fir.h" +#include "modules/rtp_rtcp/source/rtcp_packet/loss_notification.h" +#include "modules/rtp_rtcp/source/rtcp_packet/nack.h" +#include "modules/rtp_rtcp/source/rtcp_packet/pli.h" +#include "modules/rtp_rtcp/source/rtcp_packet/receiver_report.h" +#include "modules/rtp_rtcp/source/rtcp_packet/remb.h" +#include "modules/rtp_rtcp/source/rtcp_packet/sender_report.h" +#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" + +namespace webrtc { + +struct LoggedRtpPacket { + LoggedRtpPacket(Timestamp timestamp, + RTPHeader header, + size_t header_length, + size_t total_length) + : timestamp(timestamp), + header(header), + header_length(header_length), + total_length(total_length) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp; + // TODO(terelius): This allocates space for 15 CSRCs even if none are used. + RTPHeader header; + size_t header_length; + size_t total_length; +}; + +struct LoggedRtpPacketIncoming { + LoggedRtpPacketIncoming(Timestamp timestamp, + RTPHeader header, + size_t header_length, + size_t total_length) + : rtp(timestamp, header, header_length, total_length) {} + int64_t log_time_us() const { return rtp.timestamp.us(); } + int64_t log_time_ms() const { return rtp.timestamp.ms(); } + + LoggedRtpPacket rtp; +}; + +struct LoggedRtpPacketOutgoing { + LoggedRtpPacketOutgoing(Timestamp timestamp, + RTPHeader header, + size_t header_length, + size_t total_length) + : rtp(timestamp, header, header_length, total_length) {} + int64_t log_time_us() const { return rtp.timestamp.us(); } + int64_t log_time_ms() const { return rtp.timestamp.ms(); } + + LoggedRtpPacket rtp; +}; + +struct LoggedRtcpPacket { + LoggedRtcpPacket(Timestamp timestamp, const std::vector& packet) + : timestamp(timestamp), raw_data(packet) {} + LoggedRtcpPacket(Timestamp timestamp, const std::string& packet) + : timestamp(timestamp), raw_data(packet.size()) { + memcpy(raw_data.data(), packet.data(), packet.size()); + } + + LoggedRtcpPacket(const LoggedRtcpPacket& rhs) = default; + + ~LoggedRtcpPacket() = default; + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp; + std::vector raw_data; +}; + +struct LoggedRtcpPacketIncoming { + LoggedRtcpPacketIncoming(Timestamp timestamp, + const std::vector& packet) + : rtcp(timestamp, packet) {} + LoggedRtcpPacketIncoming(Timestamp timestamp, const std::string& packet) + : rtcp(timestamp, packet) {} + + int64_t log_time_us() const { return rtcp.timestamp.us(); } + int64_t log_time_ms() const { return rtcp.timestamp.ms(); } + + LoggedRtcpPacket rtcp; +}; + +struct LoggedRtcpPacketOutgoing { + LoggedRtcpPacketOutgoing(Timestamp timestamp, + const std::vector& packet) + : rtcp(timestamp, packet) {} + LoggedRtcpPacketOutgoing(Timestamp timestamp, const std::string& packet) + : rtcp(timestamp, packet) {} + + int64_t log_time_us() const { return rtcp.timestamp.us(); } + int64_t log_time_ms() const { return rtcp.timestamp.ms(); } + + LoggedRtcpPacket rtcp; +}; + +struct LoggedRtcpPacketReceiverReport { + LoggedRtcpPacketReceiverReport() = default; + LoggedRtcpPacketReceiverReport(Timestamp timestamp, + const rtcp::ReceiverReport& rr) + : timestamp(timestamp), rr(rr) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + rtcp::ReceiverReport rr; +}; + +struct LoggedRtcpPacketSenderReport { + LoggedRtcpPacketSenderReport() = default; + LoggedRtcpPacketSenderReport(Timestamp timestamp, + const rtcp::SenderReport& sr) + : timestamp(timestamp), sr(sr) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + rtcp::SenderReport sr; +}; + +struct LoggedRtcpPacketExtendedReports { + LoggedRtcpPacketExtendedReports() = default; + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + rtcp::ExtendedReports xr; +}; + +struct LoggedRtcpPacketRemb { + LoggedRtcpPacketRemb() = default; + LoggedRtcpPacketRemb(Timestamp timestamp, const rtcp::Remb& remb) + : timestamp(timestamp), remb(remb) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + rtcp::Remb remb; +}; + +struct LoggedRtcpPacketNack { + LoggedRtcpPacketNack() = default; + LoggedRtcpPacketNack(Timestamp timestamp, const rtcp::Nack& nack) + : timestamp(timestamp), nack(nack) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + rtcp::Nack nack; +}; + +struct LoggedRtcpPacketFir { + LoggedRtcpPacketFir() = default; + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + rtcp::Fir fir; +}; + +struct LoggedRtcpPacketPli { + LoggedRtcpPacketPli() = default; + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + rtcp::Pli pli; +}; + +struct LoggedRtcpPacketTransportFeedback { + LoggedRtcpPacketTransportFeedback() + : transport_feedback(/*include_timestamps=*/true, /*include_lost*/ true) { + } + LoggedRtcpPacketTransportFeedback( + Timestamp timestamp, + const rtcp::TransportFeedback& transport_feedback) + : timestamp(timestamp), transport_feedback(transport_feedback) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + rtcp::TransportFeedback transport_feedback; +}; + +struct LoggedRtcpPacketLossNotification { + LoggedRtcpPacketLossNotification() = default; + LoggedRtcpPacketLossNotification( + Timestamp timestamp, + const rtcp::LossNotification& loss_notification) + : timestamp(timestamp), loss_notification(loss_notification) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + rtcp::LossNotification loss_notification; +}; + +struct LoggedRtcpPacketBye { + LoggedRtcpPacketBye() = default; + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + rtcp::Bye bye; +}; + +} // namespace webrtc + +#endif // LOGGING_RTC_EVENT_LOG_EVENTS_LOGGED_RTP_RTCP_H_ diff --git a/logging/rtc_event_log/events/rtc_event_alr_state.cc b/logging/rtc_event_log/events/rtc_event_alr_state.cc index 3c307b9ca0..25941eb16b 100644 --- a/logging/rtc_event_log/events/rtc_event_alr_state.cc +++ b/logging/rtc_event_log/events/rtc_event_alr_state.cc @@ -13,6 +13,9 @@ #include "absl/memory/memory.h" namespace webrtc { +constexpr RtcEvent::Type RtcEventAlrState::kType; +constexpr RtcEventDefinition + RtcEventAlrState::definition_; RtcEventAlrState::RtcEventAlrState(bool in_alr) : in_alr_(in_alr) {} @@ -25,4 +28,11 @@ std::unique_ptr RtcEventAlrState::Copy() const { return absl::WrapUnique(new RtcEventAlrState(*this)); } +RtcEventLogParseStatus RtcEventAlrState::Parse( + absl::string_view s, + bool batched, + std::vector& output) { + return RtcEventAlrState::definition_.ParseBatch(s, batched, output); +} + } // namespace webrtc diff --git a/logging/rtc_event_log/events/rtc_event_alr_state.h b/logging/rtc_event_log/events/rtc_event_alr_state.h index 74d66015ef..44e68a680e 100644 --- a/logging/rtc_event_log/events/rtc_event_alr_state.h +++ b/logging/rtc_event_log/events/rtc_event_alr_state.h @@ -12,12 +12,31 @@ #define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_ALR_STATE_H_ #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_definition.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" +#include "logging/rtc_event_log/events/rtc_event_field_extraction.h" namespace webrtc { +struct LoggedAlrStateEvent { + LoggedAlrStateEvent() = default; + LoggedAlrStateEvent(Timestamp timestamp, bool in_alr) + : timestamp(timestamp), in_alr(in_alr) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + bool in_alr; +}; + class RtcEventAlrState final : public RtcEvent { public: static constexpr Type kType = Type::AlrStateEvent; @@ -32,22 +51,26 @@ class RtcEventAlrState final : public RtcEvent { bool in_alr() const { return in_alr_; } + static std::string Encode(rtc::ArrayView batch) { + return RtcEventAlrState::definition_.EncodeBatch(batch); + } + + static RtcEventLogParseStatus Parse(absl::string_view s, + bool batched, + std::vector& output); + private: RtcEventAlrState(const RtcEventAlrState& other); const bool in_alr_; -}; - -struct LoggedAlrStateEvent { - LoggedAlrStateEvent() = default; - LoggedAlrStateEvent(Timestamp timestamp, bool in_alr) - : timestamp(timestamp), in_alr(in_alr) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - Timestamp timestamp = Timestamp::MinusInfinity(); - bool in_alr; + static constexpr RtcEventDefinition + definition_{{"AlrState", RtcEventAlrState::kType}, + {&RtcEventAlrState::in_alr_, + &LoggedAlrStateEvent::in_alr, + {"in_alr", /*id=*/1, FieldType::kFixed8, /*width=*/1}}}; }; } // namespace webrtc diff --git a/logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h b/logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h index aeeb28e218..b9e919c2e8 100644 --- a/logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h +++ b/logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h @@ -12,13 +12,30 @@ #define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_AUDIO_NETWORK_ADAPTATION_H_ #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h" namespace webrtc { +struct LoggedAudioNetworkAdaptationEvent { + LoggedAudioNetworkAdaptationEvent() = default; + LoggedAudioNetworkAdaptationEvent(Timestamp timestamp, + const AudioEncoderRuntimeConfig& config) + : timestamp(timestamp), config(config) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + AudioEncoderRuntimeConfig config; +}; + struct AudioEncoderRuntimeConfig; class RtcEventAudioNetworkAdaptation final : public RtcEvent { @@ -36,25 +53,25 @@ class RtcEventAudioNetworkAdaptation final : public RtcEvent { const AudioEncoderRuntimeConfig& config() const { return *config_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventAudioNetworkAdaptation(const RtcEventAudioNetworkAdaptation& other); const std::unique_ptr config_; }; -struct LoggedAudioNetworkAdaptationEvent { - LoggedAudioNetworkAdaptationEvent() = default; - LoggedAudioNetworkAdaptationEvent(Timestamp timestamp, - const AudioEncoderRuntimeConfig& config) - : timestamp(timestamp), config(config) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - AudioEncoderRuntimeConfig config; -}; - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_AUDIO_NETWORK_ADAPTATION_H_ diff --git a/logging/rtc_event_log/events/rtc_event_audio_playout.cc b/logging/rtc_event_log/events/rtc_event_audio_playout.cc index dae61c4df3..21a3f9266c 100644 --- a/logging/rtc_event_log/events/rtc_event_audio_playout.cc +++ b/logging/rtc_event_log/events/rtc_event_audio_playout.cc @@ -14,6 +14,11 @@ namespace webrtc { +constexpr RtcEventDefinition + RtcEventAudioPlayout::definition_; + RtcEventAudioPlayout::RtcEventAudioPlayout(uint32_t ssrc) : ssrc_(ssrc) {} RtcEventAudioPlayout::RtcEventAudioPlayout(const RtcEventAudioPlayout& other) diff --git a/logging/rtc_event_log/events/rtc_event_audio_playout.h b/logging/rtc_event_log/events/rtc_event_audio_playout.h index 00d07a65bf..3788324d15 100644 --- a/logging/rtc_event_log/events/rtc_event_audio_playout.h +++ b/logging/rtc_event_log/events/rtc_event_audio_playout.h @@ -13,13 +13,30 @@ #include +#include #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_definition.h" namespace webrtc { +struct LoggedAudioPlayoutEvent { + LoggedAudioPlayoutEvent() = default; + LoggedAudioPlayoutEvent(Timestamp timestamp, uint32_t ssrc) + : timestamp(timestamp), ssrc(ssrc) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + uint32_t ssrc; +}; + class RtcEventAudioPlayout final : public RtcEvent { public: static constexpr Type kType = Type::AudioPlayout; @@ -34,22 +51,35 @@ class RtcEventAudioPlayout final : public RtcEvent { uint32_t ssrc() const { return ssrc_; } + static std::string Encode(rtc::ArrayView batch) { + return RtcEventAudioPlayout::definition_.EncodeBatch(batch); + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::map>& output) { + std::vector temp_output; + auto status = RtcEventAudioPlayout::definition_.ParseBatch( + encoded_bytes, batched, temp_output); + for (const LoggedAudioPlayoutEvent& event : temp_output) { + output[event.ssrc].push_back(event); + } + return status; + } + private: RtcEventAudioPlayout(const RtcEventAudioPlayout& other); const uint32_t ssrc_; -}; - -struct LoggedAudioPlayoutEvent { - LoggedAudioPlayoutEvent() = default; - LoggedAudioPlayoutEvent(Timestamp timestamp, uint32_t ssrc) - : timestamp(timestamp), ssrc(ssrc) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - Timestamp timestamp = Timestamp::MinusInfinity(); - uint32_t ssrc; + static constexpr RtcEventDefinition + definition_{{"AudioPlayout", RtcEventAudioPlayout::kType}, + {&RtcEventAudioPlayout::ssrc_, + &LoggedAudioPlayoutEvent::ssrc, + {"ssrc", /*id=*/1, FieldType::kFixed32, /*width=*/32}}}; }; } // namespace webrtc diff --git a/logging/rtc_event_log/events/rtc_event_audio_receive_stream_config.h b/logging/rtc_event_log/events/rtc_event_audio_receive_stream_config.h index ccf76025e6..ac8813c937 100644 --- a/logging/rtc_event_log/events/rtc_event_audio_receive_stream_config.h +++ b/logging/rtc_event_log/events/rtc_event_audio_receive_stream_config.h @@ -12,13 +12,29 @@ #define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_AUDIO_RECEIVE_STREAM_CONFIG_H_ #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" #include "logging/rtc_event_log/rtc_stream_config.h" namespace webrtc { +struct LoggedAudioRecvConfig { + LoggedAudioRecvConfig() = default; + LoggedAudioRecvConfig(Timestamp timestamp, const rtclog::StreamConfig config) + : timestamp(timestamp), config(config) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + rtclog::StreamConfig config; +}; + class RtcEventAudioReceiveStreamConfig final : public RtcEvent { public: static constexpr Type kType = Type::AudioReceiveStreamConfig; @@ -34,6 +50,19 @@ class RtcEventAudioReceiveStreamConfig final : public RtcEvent { const rtclog::StreamConfig& config() const { return *config_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventAudioReceiveStreamConfig( const RtcEventAudioReceiveStreamConfig& other); @@ -41,18 +70,6 @@ class RtcEventAudioReceiveStreamConfig final : public RtcEvent { const std::unique_ptr config_; }; -struct LoggedAudioRecvConfig { - LoggedAudioRecvConfig() = default; - LoggedAudioRecvConfig(Timestamp timestamp, const rtclog::StreamConfig config) - : timestamp(timestamp), config(config) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - rtclog::StreamConfig config; -}; - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_AUDIO_RECEIVE_STREAM_CONFIG_H_ diff --git a/logging/rtc_event_log/events/rtc_event_audio_send_stream_config.h b/logging/rtc_event_log/events/rtc_event_audio_send_stream_config.h index 4e93871ae8..8b699e71cf 100644 --- a/logging/rtc_event_log/events/rtc_event_audio_send_stream_config.h +++ b/logging/rtc_event_log/events/rtc_event_audio_send_stream_config.h @@ -12,12 +12,28 @@ #define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_AUDIO_SEND_STREAM_CONFIG_H_ #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" #include "logging/rtc_event_log/rtc_stream_config.h" namespace webrtc { +struct LoggedAudioSendConfig { + LoggedAudioSendConfig() = default; + LoggedAudioSendConfig(Timestamp timestamp, const rtclog::StreamConfig config) + : timestamp(timestamp), config(config) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + rtclog::StreamConfig config; +}; + class RtcEventAudioSendStreamConfig final : public RtcEvent { public: static constexpr Type kType = Type::AudioSendStreamConfig; @@ -33,23 +49,25 @@ class RtcEventAudioSendStreamConfig final : public RtcEvent { const rtclog::StreamConfig& config() const { return *config_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventAudioSendStreamConfig(const RtcEventAudioSendStreamConfig& other); const std::unique_ptr config_; }; -struct LoggedAudioSendConfig { - LoggedAudioSendConfig() = default; - LoggedAudioSendConfig(Timestamp timestamp, const rtclog::StreamConfig config) - : timestamp(timestamp), config(config) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - rtclog::StreamConfig config; -}; } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_AUDIO_SEND_STREAM_CONFIG_H_ diff --git a/logging/rtc_event_log/events/rtc_event_begin_log.cc b/logging/rtc_event_log/events/rtc_event_begin_log.cc new file mode 100644 index 0000000000..49b9effa9e --- /dev/null +++ b/logging/rtc_event_log/events/rtc_event_begin_log.cc @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "logging/rtc_event_log/events/rtc_event_begin_log.h" + +#include "absl/strings/string_view.h" + +namespace webrtc { +constexpr RtcEvent::Type RtcEventBeginLog::kType; +constexpr EventParameters RtcEventBeginLog::event_params_; +constexpr FieldParameters RtcEventBeginLog::utc_start_time_params_; + +RtcEventBeginLog::RtcEventBeginLog(Timestamp timestamp, + Timestamp utc_start_time) + : RtcEvent(timestamp.us()), utc_start_time_ms_(utc_start_time.ms()) {} + +RtcEventBeginLog::RtcEventBeginLog(const RtcEventBeginLog& other) + : RtcEvent(other.timestamp_us_) {} + +RtcEventBeginLog::~RtcEventBeginLog() = default; + +std::string RtcEventBeginLog::Encode(rtc::ArrayView batch) { + EventEncoder encoder(event_params_, batch); + + encoder.EncodeField( + utc_start_time_params_, + ExtractRtcEventMember(batch, &RtcEventBeginLog::utc_start_time_ms_)); + + return encoder.AsString(); +} + +RtcEventLogParseStatus RtcEventBeginLog::Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + EventParser parser; + auto status = parser.Initialize(encoded_bytes, batched); + if (!status.ok()) + return status; + + rtc::ArrayView output_batch = + ExtendLoggedBatch(output, parser.NumEventsInBatch()); + + constexpr FieldParameters timestamp_params{ + "timestamp_ms", FieldParameters::kTimestampField, FieldType::kVarInt, 64}; + RtcEventLogParseStatusOr> result = + parser.ParseNumericField(timestamp_params); + if (!result.ok()) + return result.status(); + status = PopulateRtcEventTimestamp( + result.value(), &LoggedStartEvent::timestamp, output_batch); + if (!status.ok()) + return status; + + result = parser.ParseNumericField(utc_start_time_params_); + if (!result.ok()) + return result.status(); + status = PopulateRtcEventTimestamp( + result.value(), &LoggedStartEvent::utc_start_time, output_batch); + if (!status.ok()) + return status; + + return RtcEventLogParseStatus::Success(); +} + +} // namespace webrtc diff --git a/logging/rtc_event_log/events/rtc_event_begin_log.h b/logging/rtc_event_log/events/rtc_event_begin_log.h new file mode 100644 index 0000000000..8dc47b5a02 --- /dev/null +++ b/logging/rtc_event_log/events/rtc_event_begin_log.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_BEGIN_LOG_H_ +#define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_BEGIN_LOG_H_ + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" +#include "logging/rtc_event_log/events/rtc_event_field_extraction.h" + +namespace webrtc { + +struct LoggedStartEvent { + LoggedStartEvent() = default; + + explicit LoggedStartEvent(Timestamp timestamp) + : LoggedStartEvent(timestamp, timestamp) {} + + LoggedStartEvent(Timestamp timestamp, Timestamp utc_start_time) + : timestamp(timestamp), utc_start_time(utc_start_time) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp utc_time() const { return utc_start_time; } + + Timestamp timestamp = Timestamp::PlusInfinity(); + Timestamp utc_start_time = Timestamp::PlusInfinity(); +}; + +class RtcEventBeginLog final : public RtcEvent { + public: + static constexpr Type kType = Type::BeginV3Log; + + RtcEventBeginLog(Timestamp timestamp, Timestamp utc_start_time); + ~RtcEventBeginLog() override; + + Type GetType() const override { return kType; } + bool IsConfigEvent() const override { return false; } + + static std::string Encode(rtc::ArrayView batch); + + static RtcEventLogParseStatus Parse(absl::string_view encoded_bytes, + bool batched, + std::vector& output); + + private: + RtcEventBeginLog(const RtcEventBeginLog& other); + + int64_t utc_start_time_ms_; + + static constexpr EventParameters event_params_{"BeginLog", + RtcEventBeginLog::kType}; + static constexpr FieldParameters utc_start_time_params_{ + "utc_start_time_ms", /*id=*/1, FieldType::kVarInt, /*width=*/64}; +}; + +} // namespace webrtc +#endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_BEGIN_LOG_H_ diff --git a/logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.cc b/logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.cc index f3f12192c4..0e98b2ff11 100644 --- a/logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.cc +++ b/logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.cc @@ -15,6 +15,12 @@ namespace webrtc { +constexpr RtcEventDefinition + RtcEventBweUpdateDelayBased::definition_; + RtcEventBweUpdateDelayBased::RtcEventBweUpdateDelayBased( int32_t bitrate_bps, BandwidthUsage detector_state) diff --git a/logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.h b/logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.h index 522f98fd8d..4f145de412 100644 --- a/logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.h +++ b/logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.h @@ -13,14 +13,75 @@ #include +#include #include +#include +#include +#include "absl/strings/string_view.h" #include "api/network_state_predictor.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_definition.h" namespace webrtc { +// Separate the event log encoding from the enum values. +// As long as the enum values are the same as the encodings, +// the two conversion functions can be compiled to (roughly) +// a range check each. +template <> +class RtcEventLogEnum { + static constexpr uint64_t kBwNormal = 0; + static constexpr uint64_t kBwUnderusing = 1; + static constexpr uint64_t kBwOverusing = 2; + + public: + static uint64_t Encode(BandwidthUsage x) { + switch (x) { + case BandwidthUsage::kBwNormal: + return kBwNormal; + case BandwidthUsage::kBwUnderusing: + return kBwUnderusing; + case BandwidthUsage::kBwOverusing: + return kBwOverusing; + case BandwidthUsage::kLast: + RTC_DCHECK_NOTREACHED(); + } + RTC_DCHECK_NOTREACHED(); + return std::numeric_limits::max(); + } + static RtcEventLogParseStatusOr Decode(uint64_t x) { + switch (x) { + case kBwNormal: + return BandwidthUsage::kBwNormal; + case kBwUnderusing: + return BandwidthUsage::kBwUnderusing; + case kBwOverusing: + return BandwidthUsage::kBwOverusing; + } + return RtcEventLogParseStatus::Error("Failed to decode BandwidthUsage enum", + __FILE__, __LINE__); + } +}; + +struct LoggedBweDelayBasedUpdate { + LoggedBweDelayBasedUpdate() = default; + LoggedBweDelayBasedUpdate(Timestamp timestamp, + int32_t bitrate_bps, + BandwidthUsage detector_state) + : timestamp(timestamp), + bitrate_bps(bitrate_bps), + detector_state(detector_state) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + int32_t bitrate_bps; + BandwidthUsage detector_state; +}; + class RtcEventBweUpdateDelayBased final : public RtcEvent { public: static constexpr Type kType = Type::BweUpdateDelayBased; @@ -37,28 +98,36 @@ class RtcEventBweUpdateDelayBased final : public RtcEvent { int32_t bitrate_bps() const { return bitrate_bps_; } BandwidthUsage detector_state() const { return detector_state_; } + static std::string Encode(rtc::ArrayView batch) { + return RtcEventBweUpdateDelayBased::definition_.EncodeBatch(batch); + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + return RtcEventBweUpdateDelayBased::definition_.ParseBatch(encoded_bytes, + batched, output); + } + private: RtcEventBweUpdateDelayBased(const RtcEventBweUpdateDelayBased& other); const int32_t bitrate_bps_; const BandwidthUsage detector_state_; -}; -struct LoggedBweDelayBasedUpdate { - LoggedBweDelayBasedUpdate() = default; - LoggedBweDelayBasedUpdate(Timestamp timestamp, - int32_t bitrate_bps, - BandwidthUsage detector_state) - : timestamp(timestamp), - bitrate_bps(bitrate_bps), - detector_state(detector_state) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - int32_t bitrate_bps; - BandwidthUsage detector_state; + static constexpr RtcEventDefinition + definition_{ + {"BweDelayBased", RtcEventBweUpdateDelayBased::kType}, + {&RtcEventBweUpdateDelayBased::bitrate_bps_, + &LoggedBweDelayBasedUpdate::bitrate_bps, + {"bitrate_bps", /*id=*/1, FieldType::kVarInt, /*width=*/32}}, + {&RtcEventBweUpdateDelayBased::detector_state_, + &LoggedBweDelayBasedUpdate::detector_state, + {"detector_state", /*id=*/2, FieldType::kVarInt, /*width=*/64}}}; }; } // namespace webrtc diff --git a/logging/rtc_event_log/events/rtc_event_bwe_update_loss_based.h b/logging/rtc_event_log/events/rtc_event_bwe_update_loss_based.h index b031658ea2..1caf818a0c 100644 --- a/logging/rtc_event_log/events/rtc_event_bwe_update_loss_based.h +++ b/logging/rtc_event_log/events/rtc_event_bwe_update_loss_based.h @@ -14,12 +14,36 @@ #include #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" namespace webrtc { +struct LoggedBweLossBasedUpdate { + LoggedBweLossBasedUpdate() = default; + LoggedBweLossBasedUpdate(Timestamp timestamp, + int32_t bitrate_bps, + uint8_t fraction_lost, + int32_t expected_packets) + : timestamp(timestamp), + bitrate_bps(bitrate_bps), + fraction_lost(fraction_lost), + expected_packets(expected_packets) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + int32_t bitrate_bps; + uint8_t fraction_lost; + int32_t expected_packets; +}; + class RtcEventBweUpdateLossBased final : public RtcEvent { public: static constexpr Type kType = Type::BweUpdateLossBased; @@ -38,6 +62,19 @@ class RtcEventBweUpdateLossBased final : public RtcEvent { uint8_t fraction_loss() const { return fraction_loss_; } int32_t total_packets() const { return total_packets_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventBweUpdateLossBased(const RtcEventBweUpdateLossBased& other); @@ -46,26 +83,6 @@ class RtcEventBweUpdateLossBased final : public RtcEvent { const int32_t total_packets_; }; -struct LoggedBweLossBasedUpdate { - LoggedBweLossBasedUpdate() = default; - LoggedBweLossBasedUpdate(Timestamp timestamp, - int32_t bitrate_bps, - uint8_t fraction_lost, - int32_t expected_packets) - : timestamp(timestamp), - bitrate_bps(bitrate_bps), - fraction_lost(fraction_lost), - expected_packets(expected_packets) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - int32_t bitrate_bps; - uint8_t fraction_lost; - int32_t expected_packets; -}; - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_BWE_UPDATE_LOSS_BASED_H_ diff --git a/logging/rtc_event_log/events/rtc_event_definition.h b/logging/rtc_event_log/events/rtc_event_definition.h new file mode 100644 index 0000000000..8688c5fc7b --- /dev/null +++ b/logging/rtc_event_log/events/rtc_event_definition.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_DEFINITION_H_ +#define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_DEFINITION_H_ + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" +#include "logging/rtc_event_log/events/rtc_event_field_extraction.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +template +struct RtcEventFieldDefinition { + const T EventType::*event_member; + T LoggedType::*logged_member; + FieldParameters params; +}; + +// Base case +template +class RtcEventDefinitionImpl { + public: + void EncodeImpl(EventEncoder&, rtc::ArrayView) const {} + RtcEventLogParseStatus ParseImpl(EventParser&, + rtc::ArrayView) const { + return RtcEventLogParseStatus::Success(); + } +}; + +// Recursive case +template +class RtcEventDefinitionImpl { + public: + constexpr RtcEventDefinitionImpl( + RtcEventFieldDefinition field, + RtcEventFieldDefinition... rest) + : field_(field), rest_(rest...) {} + + void EncodeImpl(EventEncoder& encoder, + rtc::ArrayView batch) const { + auto values = ExtractRtcEventMember(batch, field_.event_member); + encoder.EncodeField(field_.params, values); + rest_.EncodeImpl(encoder, batch); + } + + RtcEventLogParseStatus ParseImpl( + EventParser& parser, + rtc::ArrayView output_batch) const { + RtcEventLogParseStatusOr> result = + parser.ParseNumericField(field_.params); + if (!result.ok()) + return result.status(); + auto status = PopulateRtcEventMember(result.value(), field_.logged_member, + output_batch); + if (!status.ok()) + return status; + + return rest_.ParseImpl(parser, output_batch); + } + + private: + RtcEventFieldDefinition field_; + RtcEventDefinitionImpl rest_; +}; + +// The RtcEventDefinition sets up a mapping between the fields +// in an RtcEvent and the corresponding fields in the parsed struct. +// For example, an RtcFoo class containing two fields; `uint32_t bar` +// and `bool baz` (a log timestamp is always implicitly added) +// might have a definition +// RtcEventDefinition( +// {"foo", RtcFoo::Type}, +// {&RtcFoo::bar_, &LoggedFoo::bar, {"bar", 1, FieldType::kVarInt, 32}}, +// {&RtcFoo::baz_, &LoggedFoo::baz, {"baz", 2, FieldType::kFixed8, 1}}, +// ); +// In addition to defining string names to aid debugging, +// this specifies that +// * RtcFoo::Type uniquely identifies an RtcFoo in the encoded stream +// * The `bar` field has ID 1, is encoded as a VarInt +// (when not delta compressed), and wraps around after 32 bits. +// * The `baz` field has ID 2, is encoded as an 8-bit field +// (when not delta compressed), and wraps around after 1 bit. +// Note that the numerical field and event IDs can't be changed since +// that would break compatibility with old logs. +// In most cases (including all cases where wrap around isn't +// expected), the wrap around should be equal to the bitwidth of +// the field. +template +class RtcEventDefinition { + public: + constexpr RtcEventDefinition( + EventParameters params, + RtcEventFieldDefinition... fields) + : params_(params), fields_(fields...) {} + + std::string EncodeBatch(rtc::ArrayView batch) const { + EventEncoder encoder(params_, batch); + fields_.EncodeImpl(encoder, batch); + return encoder.AsString(); + } + + RtcEventLogParseStatus ParseBatch(absl::string_view s, + bool batched, + std::vector& output) const { + EventParser parser; + auto status = parser.Initialize(s, batched); + if (!status.ok()) + return status; + + rtc::ArrayView output_batch = + ExtendLoggedBatch(output, parser.NumEventsInBatch()); + + constexpr FieldParameters timestamp_params{"timestamp_ms", + FieldParameters::kTimestampField, + FieldType::kVarInt, 64}; + RtcEventLogParseStatusOr> result = + parser.ParseNumericField(timestamp_params); + if (!result.ok()) + return result.status(); + status = PopulateRtcEventTimestamp(result.value(), &LoggedType::timestamp, + output_batch); + if (!status.ok()) + return status; + + return fields_.ParseImpl(parser, output_batch); + } + + private: + EventParameters params_; + RtcEventDefinitionImpl fields_; +}; + +} // namespace webrtc + +#endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_DEFINITION_H_ diff --git a/logging/rtc_event_log/events/rtc_event_dtls_transport_state.h b/logging/rtc_event_log/events/rtc_event_dtls_transport_state.h index 9a3eecb3d3..c2496dd532 100644 --- a/logging/rtc_event_log/events/rtc_event_dtls_transport_state.h +++ b/logging/rtc_event_log/events/rtc_event_dtls_transport_state.h @@ -12,13 +12,25 @@ #define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_DTLS_TRANSPORT_STATE_H_ #include +#include +#include +#include "absl/strings/string_view.h" #include "api/dtls_transport_interface.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" namespace webrtc { +struct LoggedDtlsTransportState { + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + DtlsTransportState dtls_transport_state; +}; + class RtcEventDtlsTransportState : public RtcEvent { public: static constexpr Type kType = Type::DtlsTransportState; @@ -35,20 +47,25 @@ class RtcEventDtlsTransportState : public RtcEvent { return dtls_transport_state_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventDtlsTransportState(const RtcEventDtlsTransportState& other); const DtlsTransportState dtls_transport_state_; }; -struct LoggedDtlsTransportState { - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - DtlsTransportState dtls_transport_state; -}; - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_DTLS_TRANSPORT_STATE_H_ diff --git a/logging/rtc_event_log/events/rtc_event_dtls_writable_state.h b/logging/rtc_event_log/events/rtc_event_dtls_writable_state.h index c0cc5b87ef..20221df9da 100644 --- a/logging/rtc_event_log/events/rtc_event_dtls_writable_state.h +++ b/logging/rtc_event_log/events/rtc_event_dtls_writable_state.h @@ -12,12 +12,27 @@ #define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_DTLS_WRITABLE_STATE_H_ #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" namespace webrtc { +struct LoggedDtlsWritableState { + LoggedDtlsWritableState() = default; + explicit LoggedDtlsWritableState(bool writable) : writable(writable) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + bool writable; +}; + class RtcEventDtlsWritableState : public RtcEvent { public: static constexpr Type kType = Type::DtlsWritableState; @@ -32,23 +47,25 @@ class RtcEventDtlsWritableState : public RtcEvent { bool writable() const { return writable_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventDtlsWritableState(const RtcEventDtlsWritableState& other); const bool writable_; }; -struct LoggedDtlsWritableState { - LoggedDtlsWritableState() = default; - explicit LoggedDtlsWritableState(bool writable) : writable(writable) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - bool writable; -}; - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_DTLS_WRITABLE_STATE_H_ diff --git a/logging/rtc_event_log/events/rtc_event_end_log.cc b/logging/rtc_event_log/events/rtc_event_end_log.cc new file mode 100644 index 0000000000..52abf9e842 --- /dev/null +++ b/logging/rtc_event_log/events/rtc_event_end_log.cc @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "logging/rtc_event_log/events/rtc_event_end_log.h" + +#include "absl/strings/string_view.h" + +namespace webrtc { +constexpr RtcEvent::Type RtcEventEndLog::kType; +constexpr EventParameters RtcEventEndLog::event_params_; + +RtcEventEndLog::RtcEventEndLog(Timestamp timestamp) + : RtcEvent(timestamp.us()) {} + +RtcEventEndLog::RtcEventEndLog(const RtcEventEndLog& other) + : RtcEvent(other.timestamp_us_) {} + +RtcEventEndLog::~RtcEventEndLog() = default; + +std::string RtcEventEndLog::Encode(rtc::ArrayView batch) { + EventEncoder encoder(event_params_, batch); + return encoder.AsString(); +} + +RtcEventLogParseStatus RtcEventEndLog::Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + EventParser parser; + auto status = parser.Initialize(encoded_bytes, batched); + if (!status.ok()) + return status; + + rtc::ArrayView output_batch = + ExtendLoggedBatch(output, parser.NumEventsInBatch()); + + constexpr FieldParameters timestamp_params{ + "timestamp_ms", FieldParameters::kTimestampField, FieldType::kVarInt, 64}; + RtcEventLogParseStatusOr> result = + parser.ParseNumericField(timestamp_params); + if (!result.ok()) + return result.status(); + status = PopulateRtcEventTimestamp(result.value(), + &LoggedStopEvent::timestamp, output_batch); + if (!status.ok()) + return status; + + return RtcEventLogParseStatus::Success(); +} + +} // namespace webrtc diff --git a/logging/rtc_event_log/events/rtc_event_end_log.h b/logging/rtc_event_log/events/rtc_event_end_log.h new file mode 100644 index 0000000000..ed7770f339 --- /dev/null +++ b/logging/rtc_event_log/events/rtc_event_end_log.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_END_LOG_H_ +#define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_END_LOG_H_ + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" +#include "logging/rtc_event_log/events/rtc_event_field_extraction.h" + +namespace webrtc { + +struct LoggedStopEvent { + LoggedStopEvent() = default; + + explicit LoggedStopEvent(Timestamp timestamp) : timestamp(timestamp) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::PlusInfinity(); +}; + +class RtcEventEndLog final : public RtcEvent { + public: + static constexpr Type kType = Type::EndV3Log; + + explicit RtcEventEndLog(Timestamp timestamp); + ~RtcEventEndLog() override; + + Type GetType() const override { return kType; } + bool IsConfigEvent() const override { return false; } + + static std::string Encode(rtc::ArrayView batch); + + static RtcEventLogParseStatus Parse(absl::string_view encoded_bytes, + bool batched, + std::vector& output); + + private: + RtcEventEndLog(const RtcEventEndLog& other); + + static constexpr EventParameters event_params_{"EndLog", + RtcEventEndLog::kType}; +}; + +} // namespace webrtc +#endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_END_LOG_H_ diff --git a/logging/rtc_event_log/events/rtc_event_field_encoding.cc b/logging/rtc_event_log/events/rtc_event_field_encoding.cc new file mode 100644 index 0000000000..68188ce856 --- /dev/null +++ b/logging/rtc_event_log/events/rtc_event_field_encoding.cc @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "logging/rtc_event_log/events/rtc_event_field_encoding.h" + +#include +#include +#include +#include + +#include "logging/rtc_event_log/encoder/bit_writer.h" +#include "logging/rtc_event_log/encoder/var_int.h" +#include "logging/rtc_event_log/events/rtc_event_field_extraction.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +using webrtc_event_logging::UnsignedDelta; + +namespace { + +std::string SerializeLittleEndian(uint64_t value, uint8_t bytes) { + RTC_DCHECK_LE(bytes, sizeof(uint64_t)); + RTC_DCHECK_GE(bytes, 1); + if (bytes < sizeof(uint64_t)) { + // Note that shifting a 64-bit value by 64 (or more) bits is undefined. + RTC_DCHECK_EQ(value >> (8 * bytes), 0); + } + std::string output(bytes, 0); + // Getting a non-const pointer to the representation. See e.g. + // https://en.cppreference.com/w/cpp/string/basic_string: + // "The elements of a basic_string are stored contiguously, + // that is, [...] a pointer to s[0] can be passed to functions + // that expect a pointer to the first element of a null-terminated + // CharT[] array." + uint8_t* p = reinterpret_cast(&output[0]); +#ifdef WEBRTC_ARCH_LITTLE_ENDIAN + memcpy(p, &value, bytes); +#else + while (bytes > 0) { + *p = static_cast(value & 0xFF); + value >>= 8; + ++p; + --bytes; + } +#endif // WEBRTC_ARCH_LITTLE_ENDIAN + return output; +} + +} // namespace + +namespace webrtc { + +std::string EncodeOptionalValuePositions(std::vector positions) { + BitWriter writer((positions.size() + 7) / 8); + for (bool position : positions) { + writer.WriteBits(position ? 1u : 0u, 1); + } + return writer.GetString(); +} + +std::string EncodeSingleValue(uint64_t value, FieldType field_type) { + switch (field_type) { + case FieldType::kFixed8: + return SerializeLittleEndian(value, /*bytes=*/1); + case FieldType::kFixed32: + return SerializeLittleEndian(value, /*bytes=*/4); + case FieldType::kFixed64: + return SerializeLittleEndian(value, /*bytes=*/8); + case FieldType::kVarInt: + return EncodeVarInt(value); + case FieldType::kString: + RTC_DCHECK_NOTREACHED(); + return std::string(); + } + RTC_DCHECK_NOTREACHED(); + return std::string(); +} + +absl::optional ConvertFieldType(uint64_t value) { + switch (value) { + case static_cast(FieldType::kFixed8): + return FieldType::kFixed8; + case static_cast(FieldType::kFixed32): + return FieldType::kFixed32; + case static_cast(FieldType::kFixed64): + return FieldType::kFixed64; + case static_cast(FieldType::kVarInt): + return FieldType::kVarInt; + case static_cast(FieldType::kString): + return FieldType::kString; + default: + return absl::nullopt; + } +} + +std::string EncodeDeltasV3(FixedLengthEncodingParametersV3 params, + uint64_t base, + rtc::ArrayView values) { + size_t outputbound = (values.size() * params.delta_bit_width() + 7) / 8; + BitWriter writer(outputbound); + + uint64_t previous = base; + for (uint64_t value : values) { + if (params.signed_deltas()) { + uint64_t positive_delta = + UnsignedDelta(previous, value, params.value_mask()); + uint64_t negative_delta = + UnsignedDelta(value, previous, params.value_mask()); + uint64_t delta; + if (positive_delta <= negative_delta) { + delta = positive_delta; + } else { + // Compute the two's complement representation of a negative + // delta, in a field width params_.delta_mask(). + RTC_DCHECK_GE(params.delta_mask(), negative_delta); + RTC_DCHECK_LT(params.delta_mask() - negative_delta, + params.delta_mask()); + delta = params.delta_mask() - negative_delta + 1; + RTC_DCHECK_LE(delta, params.delta_mask()); + } + writer.WriteBits(delta, params.delta_bit_width()); + } else { + uint64_t delta = UnsignedDelta(previous, value, params.value_mask()); + writer.WriteBits(delta, params.delta_bit_width()); + } + previous = value; + } + + return writer.GetString(); +} + +EventEncoder::EventEncoder(EventParameters params, + rtc::ArrayView batch) { + batch_size_ = batch.size(); + if (!batch.empty()) { + // Encode event type. + uint32_t batched = batch.size() > 1 ? 1 : 0; + event_tag_ = (static_cast(params.id) << 1) + batched; + + // Event tag and number of encoded bytes will be filled in when the + // encoding is finalized in AsString(). + + // Encode number of events in batch + if (batched) { + encoded_fields_.push_back(EncodeVarInt(batch.size())); + } + + // Encode timestamp + std::vector timestamps; + timestamps.reserve(batch.size()); + for (const RtcEvent* event : batch) { + timestamps.push_back(EncodeAsUnsigned(event->timestamp_ms())); + } + constexpr FieldParameters timestamp_params{"timestamp_ms", + FieldParameters::kTimestampField, + FieldType::kVarInt, 64}; + EncodeField(timestamp_params, timestamps); + } +} + +void EventEncoder::EncodeField(const FieldParameters& params, + const ValuesWithPositions& values) { + return EncodeField(params, values.values, &values.position_mask); +} + +void EventEncoder::EncodeField(const FieldParameters& params, + const std::vector& values, + const std::vector* positions) { + if (positions) { + RTC_DCHECK_EQ(positions->size(), batch_size_); + RTC_DCHECK_LE(values.size(), batch_size_); + } else { + RTC_DCHECK_EQ(values.size(), batch_size_); + } + + if (values.size() == 0) { + // If all values for a particular field is empty/nullopt, + // then we completely skip the field even if the the batch is non-empty. + return; + } + + // We know that each event starts with the varint encoded timestamp, + // so we omit that field tag (field id + field type). In all other + // cases, we write the field tag. + if (params.field_id != FieldParameters::kTimestampField) { + RTC_DCHECK_LE(params.field_id, std::numeric_limits::max() >> 3); + uint64_t field_tag = params.field_id << 3; + field_tag += static_cast(params.field_type); + encoded_fields_.push_back(EncodeVarInt(field_tag)); + } + + RTC_CHECK_GE(values.size(), 1); + if (batch_size_ == 1) { + encoded_fields_.push_back(EncodeSingleValue(values[0], params.field_type)); + return; + } + + const bool values_optional = values.size() != batch_size_; + + // Compute delta parameters + rtc::ArrayView all_values(values); + uint64_t base = values[0]; + rtc::ArrayView remaining_values(all_values.subview(1)); + + FixedLengthEncodingParametersV3 delta_params = + FixedLengthEncodingParametersV3::CalculateParameters( + base, remaining_values, params.value_width, values_optional); + + encoded_fields_.push_back(EncodeVarInt(delta_params.DeltaHeaderAsInt())); + + if (values_optional) { + RTC_CHECK(positions); + encoded_fields_.push_back(EncodeOptionalValuePositions(*positions)); + } + // Base element, encoded as uint8, uint32, uint64 or varint + encoded_fields_.push_back(EncodeSingleValue(base, params.field_type)); + + // If all (existing) values are equal to the base, then we can skip + // writing the all-zero deltas, and instead infer those from the delta + // header. + if (!delta_params.values_equal()) { + encoded_fields_.push_back( + EncodeDeltasV3(delta_params, base, remaining_values)); + } +} + +void EventEncoder::EncodeField(const FieldParameters& params, + const std::vector& values) { + RTC_DCHECK_EQ(values.size(), batch_size_); + + if (values.size() == 0) { + // If all values for a particular field is empty/nullopt, + // then we completely skip the field even if the the batch is non-empty. + return; + } + + // Write the field tag. + RTC_CHECK_NE(params.field_id, FieldParameters::kTimestampField); + RTC_DCHECK_LE(params.field_id, std::numeric_limits::max() >> 3); + RTC_DCHECK_EQ(params.field_type, FieldType::kString); + uint64_t field_tag = params.field_id << 3; + field_tag += static_cast(params.field_type); + encoded_fields_.push_back(EncodeVarInt(field_tag)); + + if (values.size() > 1) { + // If multiple values in the batch, write the encoding + // parameters. (Values >0 reserved for future use.) + uint64_t encoding_params = 0; + encoded_fields_.push_back(EncodeVarInt(encoding_params)); + } + + // Write the strings as (length, data) pairs. + for (absl::string_view s : values) { + encoded_fields_.push_back(EncodeVarInt(s.size())); + encoded_fields_.push_back(std::string(s)); + } +} + +std::string EventEncoder::AsString() { + std::string encoded_event; + + if (batch_size_ == 0) { + RTC_DCHECK_EQ(encoded_fields_.size(), 0); + return encoded_event; + } + + // Compute size of encoded fields. + size_t total_fields_size = 0; + for (const std::string& s : encoded_fields_) { + total_fields_size += s.size(); + } + + constexpr size_t kExpectedMaxEventTagBytes = 4; + constexpr size_t kExpectedMaxSizeEncodingBytes = 4; + encoded_event.reserve(kExpectedMaxEventTagBytes + + kExpectedMaxSizeEncodingBytes + total_fields_size); + + // Encode event tag (event id and whether batch or single event). + encoded_event.append(EncodeVarInt(event_tag_)); + + // Encode size of the remaining fields. + encoded_event.append(EncodeVarInt(total_fields_size)); + + // Append encoded fields. + for (const std::string& s : encoded_fields_) { + encoded_event.append(s); + } + + return encoded_event; +} + +} // namespace webrtc diff --git a/logging/rtc_event_log/events/rtc_event_field_encoding.h b/logging/rtc_event_log/events/rtc_event_field_encoding.h new file mode 100644 index 0000000000..33b77b80f5 --- /dev/null +++ b/logging/rtc_event_log/events/rtc_event_field_encoding.h @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_FIELD_ENCODING_H_ +#define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_FIELD_ENCODING_H_ + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "api/rtc_event_log/rtc_event.h" +#include "logging/rtc_event_log/encoder/rtc_event_log_encoder_common.h" +#include "logging/rtc_event_log/events/fixed_length_encoding_parameters_v3.h" +#include "logging/rtc_event_log/events/rtc_event_field_extraction.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +// To maintain backwards compatibility with past (or future) logs, +// the constants in this enum must not be changed. +// New field types with numerical IDs 5-7 can be added, but old +// parsers will fail to parse events containing the new fields. +enum class FieldType : uint8_t { + kFixed8 = 0, + kFixed32 = 1, + kFixed64 = 2, + kVarInt = 3, + kString = 4, +}; + +// EventParameters map an event name to a numerical ID. +struct EventParameters { + // The name is primarily used for debugging purposes. + const char* const name; + // + const RtcEvent::Type id; +}; + +// FieldParameters define the encoding for a field. +struct FieldParameters { + // The name is primarily used for debugging purposes. + const char* const name; + // Numerical ID for the field. Must be strictly greater than 0, + // and unique within each event type. + const uint64_t field_id; + // Encoding type for the base (i.e. non-delta) field in a batch. + const FieldType field_type; + // Number of bits after which wrap-around occurs. In most cases, + // this should be the number of bits in the field data type, i.e. + // 8 for an uint8_t, 32 for a int32_t and so on. However, `value_width` + // can be used to achieve a more efficient encoding if it is known + // that the field uses a smaller number of bits. For example, a + // 15-bit counter could set `value_width` to 15 even if the data is + // actually stored in a uint32_t. + const uint64_t value_width; + // Field ID 0 is reserved for timestamps. + static constexpr uint64_t kTimestampField = 0; +}; + +// The EventEncoder is used to encode a batch of events. +class EventEncoder { + public: + EventEncoder(EventParameters params, rtc::ArrayView batch); + + void EncodeField(const FieldParameters& params, + const std::vector& values, + const std::vector* positions = nullptr); + + void EncodeField(const FieldParameters& params, + const ValuesWithPositions& values); + + void EncodeField(const FieldParameters& params, + const std::vector& values); + + std::string AsString(); + + private: + size_t batch_size_; + uint32_t event_tag_; + std::vector encoded_fields_; +}; + +std::string EncodeSingleValue(uint64_t value, FieldType field_type); +std::string EncodeDeltasV3(FixedLengthEncodingParametersV3 params, + uint64_t base, + rtc::ArrayView values); + +// Given a batch of RtcEvents and a member pointer, extract that +// member from each event in the batch. Signed integer members are +// encoded as unsigned, and the bitsize increased so the result can +// represented as a std::vector. +// This is intended to be used in conjuction with +// EventEncoder::EncodeField to encode a batch of events as follows: +// auto values = ExtractRtcEventMember(batch, RtcEventFoo::timestamp_ms); +// encoder.EncodeField(timestamp_params, values) +template ::value, bool> = true> +std::vector ExtractRtcEventMember( + rtc::ArrayView batch, + const T E::*member) { + std::vector values; + values.reserve(batch.size()); + for (const RtcEvent* event : batch) { + RTC_CHECK_EQ(event->GetType(), E::kType); + T value = static_cast(event)->*member; + values.push_back(EncodeAsUnsigned(value)); + } + return values; +} + +// Extract an optional field from a batch of RtcEvents. +// The function returns a vector of positions in addition to the vector of +// values. The vector `positions` has the same length as the batch where +// `positions[i] == true` iff the batch[i]->member has a value. +// The values vector only contains the values that exists, so it +// may be shorter than the batch. +template ::value, bool> = true> +ValuesWithPositions ExtractRtcEventMember(rtc::ArrayView batch, + const absl::optional E::*member) { + ValuesWithPositions result; + result.position_mask.reserve(batch.size()); + result.values.reserve(batch.size()); + for (const RtcEvent* event : batch) { + RTC_CHECK_EQ(event->GetType(), E::kType); + absl::optional field = static_cast(event)->*member; + result.position_mask.push_back(field.has_value()); + if (field.has_value()) { + result.values.push_back(EncodeAsUnsigned(field.value())); + } + } + return result; +} + +// Extract an enum field from a batch of RtcEvents. +// Requires specializing RtcEventLogEnum for the enum type T. +template ::value, bool> = true> +std::vector ExtractRtcEventMember( + rtc::ArrayView batch, + const T E::*member) { + std::vector values; + values.reserve(batch.size()); + for (const RtcEvent* event : batch) { + RTC_CHECK_EQ(event->GetType(), E::kType); + T value = static_cast(event)->*member; + values.push_back(RtcEventLogEnum::Encode(value)); + } + return values; +} + +// Extract a string field from a batch of RtcEvents. +template +std::vector ExtractRtcEventMember( + rtc::ArrayView batch, + const std::string E::*member) { + std::vector values; + values.reserve(batch.size()); + for (const RtcEvent* event : batch) { + RTC_CHECK_EQ(event->GetType(), E::kType); + absl::string_view str = static_cast(event)->*member; + values.push_back(str); + } + return values; +} + +} // namespace webrtc +#endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_FIELD_ENCODING_H_ diff --git a/logging/rtc_event_log/events/rtc_event_field_encoding_parser.cc b/logging/rtc_event_log/events/rtc_event_field_encoding_parser.cc new file mode 100644 index 0000000000..a9b0c08307 --- /dev/null +++ b/logging/rtc_event_log/events/rtc_event_field_encoding_parser.cc @@ -0,0 +1,398 @@ + +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "logging/rtc_event_log/encoder/var_int.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding.h" +#include "rtc_base/bitstream_reader.h" +#include "rtc_base/checks.h" + +namespace { +absl::optional ConvertFieldType(uint64_t value) { + switch (value) { + case static_cast(webrtc::FieldType::kFixed8): + return webrtc::FieldType::kFixed8; + case static_cast(webrtc::FieldType::kFixed32): + return webrtc::FieldType::kFixed32; + case static_cast(webrtc::FieldType::kFixed64): + return webrtc::FieldType::kFixed64; + case static_cast(webrtc::FieldType::kVarInt): + return webrtc::FieldType::kVarInt; + case static_cast(webrtc::FieldType::kString): + return webrtc::FieldType::kString; + default: + return absl::nullopt; + } +} +} // namespace + +namespace webrtc { + +uint64_t EventParser::ReadLittleEndian(uint8_t bytes) { + RTC_DCHECK_LE(bytes, sizeof(uint64_t)); + RTC_DCHECK_GE(bytes, 1); + + uint64_t value = 0; + + if (bytes > pending_data_.length()) { + SetError(); + return value; + } + + const uint8_t* p = reinterpret_cast(pending_data_.data()); + unsigned int shift = 0; + uint8_t remaining = bytes; + while (remaining > 0) { + value += (static_cast(*p) << shift); + shift += 8; + ++p; + --remaining; + } + + pending_data_ = pending_data_.substr(bytes); + return value; +} + +uint64_t EventParser::ReadVarInt() { + uint64_t output = 0; + bool success; + std::tie(success, pending_data_) = DecodeVarInt(pending_data_, &output); + if (!success) { + SetError(); + } + return output; +} + +uint64_t EventParser::ReadOptionalValuePositions() { + RTC_DCHECK(positions_.empty()); + size_t bits_to_read = NumEventsInBatch(); + positions_.reserve(bits_to_read); + if (pending_data_.size() * 8 < bits_to_read) { + SetError(); + return 0; + } + + BitstreamReader reader(pending_data_); + for (size_t i = 0; i < bits_to_read; i++) { + positions_.push_back(reader.ReadBit()); + } + if (!reader.Ok()) { + SetError(); + return 0; + } + + size_t num_existing_values = + std::count(positions_.begin(), positions_.end(), 1); + pending_data_ = pending_data_.substr((bits_to_read + 7) / 8); + return num_existing_values; +} + +uint64_t EventParser::ReadSingleValue(FieldType field_type) { + switch (field_type) { + case FieldType::kFixed8: + return ReadLittleEndian(/*bytes=*/1); + case FieldType::kFixed32: + return ReadLittleEndian(/*bytes=*/4); + case FieldType::kFixed64: + return ReadLittleEndian(/*bytes=*/8); + case FieldType::kVarInt: + return ReadVarInt(); + case FieldType::kString: + RTC_DCHECK_NOTREACHED(); + SetError(); + return 0; + } + RTC_DCHECK_NOTREACHED(); + SetError(); + return 0; +} + +void EventParser::ReadDeltasAndPopulateValues( + FixedLengthEncodingParametersV3 params, + uint64_t num_deltas, + uint64_t base) { + RTC_DCHECK(values_.empty()); + values_.reserve(num_deltas + 1); + values_.push_back(base); + + if (pending_data_.size() * 8 < num_deltas * params.delta_bit_width()) { + SetError(); + return; + } + + BitstreamReader reader(pending_data_); + const uint64_t top_bit = static_cast(1) + << (params.delta_bit_width() - 1); + + uint64_t value = base; + for (uint64_t i = 0; i < num_deltas; ++i) { + uint64_t delta = reader.ReadBits(params.delta_bit_width()); + RTC_DCHECK_LE(value, webrtc_event_logging::MaxUnsignedValueOfBitWidth( + params.value_bit_width())); + RTC_DCHECK_LE(delta, webrtc_event_logging::MaxUnsignedValueOfBitWidth( + params.delta_bit_width())); + bool negative_delta = params.signed_deltas() && ((delta & top_bit) != 0); + if (negative_delta) { + uint64_t delta_abs = (~delta & params.delta_mask()) + 1; + value = (value - delta_abs) & params.value_mask(); + } else { + value = (value + delta) & params.value_mask(); + } + values_.push_back(value); + } + + if (!reader.Ok()) { + SetError(); + return; + } + + pending_data_ = + pending_data_.substr((num_deltas * params.delta_bit_width() + 7) / 8); +} + +RtcEventLogParseStatus EventParser::Initialize(absl::string_view s, + bool batched) { + pending_data_ = s; + num_events_ = 1; + + if (batched) { + num_events_ = ReadVarInt(); + if (!Ok()) { + return RtcEventLogParseStatus::Error( + "Failed to read number of events in batch.", __FILE__, __LINE__); + } + } + return RtcEventLogParseStatus::Success(); +} + +RtcEventLogParseStatus EventParser::ParseNumericFieldInternal( + uint64_t value_bit_width, + FieldType field_type) { + RTC_DCHECK(values_.empty()); + RTC_DCHECK(positions_.empty()); + + if (num_events_ == 1) { + // Just a single value in the batch. + uint64_t base = ReadSingleValue(field_type); + if (!Ok()) { + return RtcEventLogParseStatus::Error("Failed to read value", __FILE__, + __LINE__); + } + positions_.push_back(true); + values_.push_back(base); + } else { + // Delta compressed batch. + // Read delta header. + uint64_t header_value = ReadVarInt(); + if (!Ok()) + return RtcEventLogParseStatus::Error("Failed to read delta header", + __FILE__, __LINE__); + // NB: value_bit_width may be incorrect for the field, if this isn't the + // field we are looking for. + absl::optional delta_header = + FixedLengthEncodingParametersV3::ParseDeltaHeader(header_value, + value_bit_width); + if (!delta_header.has_value()) { + return RtcEventLogParseStatus::Error("Failed to parse delta header", + __FILE__, __LINE__); + } + + uint64_t num_existing_deltas = NumEventsInBatch() - 1; + if (delta_header->values_optional()) { + size_t num_nonempty_values = ReadOptionalValuePositions(); + if (!Ok()) { + return RtcEventLogParseStatus::Error( + "Failed to read positions of optional values", __FILE__, __LINE__); + } + if (num_nonempty_values < 1 || NumEventsInBatch() < num_nonempty_values) { + return RtcEventLogParseStatus::Error( + "Expected at least one non_empty value", __FILE__, __LINE__); + } + num_existing_deltas = num_nonempty_values - 1; + } else { + // All elements in the batch have values. + positions_.assign(NumEventsInBatch(), 1u); + } + + // Read base. + uint64_t base = ReadSingleValue(field_type); + if (!Ok()) { + return RtcEventLogParseStatus::Error("Failed to read value", __FILE__, + __LINE__); + } + + if (delta_header->values_equal()) { + // Duplicate the base value num_existing_deltas times. + values_.assign(num_existing_deltas + 1, base); + } else { + // Read deltas; ceil(num_existing_deltas*delta_width/8) bits + ReadDeltasAndPopulateValues(delta_header.value(), num_existing_deltas, + base); + if (!Ok()) { + return RtcEventLogParseStatus::Error("Failed to decode deltas", + __FILE__, __LINE__); + } + } + } + return RtcEventLogParseStatus::Success(); +} + +RtcEventLogParseStatus EventParser::ParseStringFieldInternal() { + RTC_DCHECK(strings_.empty()); + if (num_events_ > 1) { + // String encoding params reserved for future use. + uint64_t encoding_params = ReadVarInt(); + if (!Ok()) { + return RtcEventLogParseStatus::Error("Failed to read string encoding", + __FILE__, __LINE__); + } + if (encoding_params != 0) { + return RtcEventLogParseStatus::Error( + "Unrecognized string encoding parameters", __FILE__, __LINE__); + } + } + strings_.reserve(num_events_); + for (uint64_t i = 0; i < num_events_; ++i) { + // Just a single value in the batch. + uint64_t size = ReadVarInt(); + if (!Ok()) { + return RtcEventLogParseStatus::Error("Failed to read string size", + __FILE__, __LINE__); + } + if (size > pending_data_.size()) { + return RtcEventLogParseStatus::Error("String size exceeds remaining data", + __FILE__, __LINE__); + } + strings_.push_back(pending_data_.substr(0, size)); + pending_data_ = pending_data_.substr(size); + } + return RtcEventLogParseStatus::Success(); +} + +RtcEventLogParseStatus EventParser::ParseField(const FieldParameters& params) { + // Verify that the event parses fields in increasing order. + if (params.field_id == FieldParameters::kTimestampField) { + RTC_DCHECK_EQ(last_field_id_, FieldParameters::kTimestampField); + } else { + RTC_DCHECK_GT(params.field_id, last_field_id_); + } + last_field_id_ = params.field_id; + + // Initialization for positional fields that don't encode field ID and type. + uint64_t field_id = params.field_id; + FieldType field_type = params.field_type; + + // Fields are encoded in increasing field_id order. + // Skip unknown fields with field_id < params.field_id until we either + // find params.field_id or a field with higher id, in which case we know that + // params.field_id doesn't exist. + while (!pending_data_.empty()) { + absl::string_view field_start = pending_data_; + ClearTemporaries(); + + // Read tag for non-positional fields. + if (params.field_id != FieldParameters::kTimestampField) { + uint64_t field_tag = ReadVarInt(); + if (!Ok()) + return RtcEventLogParseStatus::Error("Failed to read field tag", + __FILE__, __LINE__); + // Split tag into field ID and field type. + field_id = field_tag >> 3; + absl::optional conversion = ConvertFieldType(field_tag & 7u); + if (!conversion.has_value()) + return RtcEventLogParseStatus::Error("Failed to parse field type", + __FILE__, __LINE__); + field_type = conversion.value(); + } + + if (field_id > params.field_id) { + // We've passed all fields with ids less than or equal to what we are + // looking for. Reset pending_data_ to first field with id higher than + // params.field_id, since we didn't find the field we were looking for. + pending_data_ = field_start; + return RtcEventLogParseStatus::Success(); + } + + if (field_type == FieldType::kString) { + auto status = ParseStringFieldInternal(); + if (!status.ok()) { + return status; + } + } else { + auto status = ParseNumericFieldInternal(params.value_width, field_type); + if (!status.ok()) { + return status; + } + } + + if (field_id == params.field_id) { + // The field we're looking for has been found and values populated. + return RtcEventLogParseStatus::Success(); + } + } + + // Field not found because the event ended. + ClearTemporaries(); + return RtcEventLogParseStatus::Success(); +} + +RtcEventLogParseStatusOr> +EventParser::ParseStringField(const FieldParameters& params, + bool required_field) { + using StatusOr = RtcEventLogParseStatusOr>; + RTC_DCHECK_EQ(params.field_type, FieldType::kString); + auto status = ParseField(params); + if (!status.ok()) + return StatusOr(status); + rtc::ArrayView strings = GetStrings(); + if (required_field && strings.size() != NumEventsInBatch()) { + return StatusOr::Error("Required string field not found", __FILE__, + __LINE__); + } + return StatusOr(strings); +} + +RtcEventLogParseStatusOr> +EventParser::ParseNumericField(const FieldParameters& params, + bool required_field) { + using StatusOr = RtcEventLogParseStatusOr>; + RTC_DCHECK_NE(params.field_type, FieldType::kString); + auto status = ParseField(params); + if (!status.ok()) + return StatusOr(status); + rtc::ArrayView values = GetValues(); + if (required_field && values.size() != NumEventsInBatch()) { + return StatusOr::Error("Required numerical field not found", __FILE__, + __LINE__); + } + return StatusOr(values); +} + +RtcEventLogParseStatusOr +EventParser::ParseOptionalNumericField(const FieldParameters& params, + bool required_field) { + using StatusOr = RtcEventLogParseStatusOr; + RTC_DCHECK_NE(params.field_type, FieldType::kString); + auto status = ParseField(params); + if (!status.ok()) + return StatusOr(status); + ValueAndPostionView view{GetValues(), GetPositions()}; + if (required_field && view.positions.size() != NumEventsInBatch()) { + return StatusOr::Error("Required numerical field not found", __FILE__, + __LINE__); + } + return StatusOr(view); +} + +} // namespace webrtc diff --git a/logging/rtc_event_log/events/rtc_event_field_encoding_parser.h b/logging/rtc_event_log/events/rtc_event_field_encoding_parser.h new file mode 100644 index 0000000000..c33d4bee3a --- /dev/null +++ b/logging/rtc_event_log/events/rtc_event_field_encoding_parser.h @@ -0,0 +1,285 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_FIELD_ENCODING_PARSER_H_ +#define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_FIELD_ENCODING_PARSER_H_ + +#include +#include + +#include "logging/rtc_event_log/events/rtc_event_field_encoding.h" + +// TODO(terelius): Compared to a generic 'Status' class, this +// class allows us additional information about the context +// in which the error occurred. This is currently limited to +// the source location (file and line), but we plan on adding +// information about the event and field name being parsed. +// If/when we start using absl::Status in WebRTC, consider +// whether payloads would be an appropriate alternative. +class RtcEventLogParseStatus { + template + friend class RtcEventLogParseStatusOr; + + public: + static RtcEventLogParseStatus Success() { return RtcEventLogParseStatus(); } + static RtcEventLogParseStatus Error(std::string error, + std::string file, + int line) { + return RtcEventLogParseStatus(error, file, line); + } + + bool ok() const { return error_.empty(); } + ABSL_DEPRECATED("Use ok() instead") explicit operator bool() const { + return ok(); + } + + std::string message() const { return error_; } + + private: + RtcEventLogParseStatus() : error_() {} + RtcEventLogParseStatus(std::string error, std::string file, int line) + : error_(error + " (" + file + ": " + std::to_string(line) + ")") {} + + std::string error_; +}; + +template +class RtcEventLogParseStatusOr { + public: + RtcEventLogParseStatusOr(RtcEventLogParseStatus status) // NOLINT + : status_(status), value_() {} + RtcEventLogParseStatusOr(const T& value) // NOLINT + : status_(), value_(value) {} + + bool ok() const { return status_.ok(); } + + std::string message() const { return status_.message(); } + + RtcEventLogParseStatus status() const { return status_; } + + const T& value() const { + RTC_DCHECK(ok()); + return value_; + } + + T& value() { + RTC_DCHECK(ok()); + return value_; + } + + static RtcEventLogParseStatusOr Error(std::string error, + std::string file, + int line) { + return RtcEventLogParseStatusOr(error, file, line); + } + + private: + RtcEventLogParseStatusOr() : status_() {} + RtcEventLogParseStatusOr(std::string error, std::string file, int line) + : status_(error, file, line), value_() {} + + RtcEventLogParseStatus status_; + T value_; +}; + +namespace webrtc { + +class EventParser { + public: + struct ValueAndPostionView { + rtc::ArrayView values; + rtc::ArrayView positions; + }; + + EventParser() = default; + + // N.B: This method stores a abls::string_view into the string to be + // parsed. The caller is responsible for ensuring that the actual string + // remains unmodified and outlives the EventParser. + RtcEventLogParseStatus Initialize(absl::string_view s, bool batched); + + // Attempts to parse the field specified by `params`, skipping past + // other fields that may occur before it. If 'required_field == true', + // then failing to find the field is an error, otherwise the functions + // return success, but with an empty view of values. + RtcEventLogParseStatusOr> ParseStringField( + const FieldParameters& params, + bool required_field = true); + RtcEventLogParseStatusOr> ParseNumericField( + const FieldParameters& params, + bool required_field = true); + RtcEventLogParseStatusOr ParseOptionalNumericField( + const FieldParameters& params, + bool required_field = true); + + // Number of events in a batch. + uint64_t NumEventsInBatch() const { return num_events_; } + + // Bytes remaining in `pending_data_`. Assuming there are no unknown + // fields, BytesRemaining() should return 0 when all known fields + // in the event have been parsed. + size_t RemainingBytes() const { return pending_data_.size(); } + + private: + uint64_t ReadLittleEndian(uint8_t bytes); + uint64_t ReadVarInt(); + uint64_t ReadSingleValue(FieldType field_type); + uint64_t ReadOptionalValuePositions(); + void ReadDeltasAndPopulateValues(FixedLengthEncodingParametersV3 params, + uint64_t num_deltas, + uint64_t base); + RtcEventLogParseStatus ParseNumericFieldInternal(uint64_t value_bit_width, + FieldType field_type); + RtcEventLogParseStatus ParseStringFieldInternal(); + + // Attempts to parse the field specified by `params`, skipping past + // other fields that may occur before it. Returns + // RtcEventLogParseStatus::Success() and populates `values_` (and + // `positions_`) if the field is found. Returns + // RtcEventLogParseStatus::Success() and clears `values_` (and `positions_`) + // if the field doesn't exist. Returns a RtcEventLogParseStatus::Error() if + // the log is incomplete, malformed or otherwise can't be parsed. + RtcEventLogParseStatus ParseField(const FieldParameters& params); + + void SetError() { error_ = true; } + bool Ok() const { return !error_; } + + rtc::ArrayView GetValues() { return values_; } + rtc::ArrayView GetPositions() { return positions_; } + rtc::ArrayView GetStrings() { return strings_; } + + void ClearTemporaries() { + positions_.clear(); + values_.clear(); + strings_.clear(); + } + + // Tracks whether an error has occurred in one of the helper + // functions above. + bool error_ = false; + + // Temporary storage for result. + std::vector positions_; + std::vector values_; + std::vector strings_; + + // String to be consumed. + absl::string_view pending_data_; + uint64_t num_events_ = 1; + uint64_t last_field_id_ = FieldParameters::kTimestampField; +}; + +// Inverse of the ExtractRtcEventMember function used when parsing +// a log. Uses a vector of values to populate a specific field in a +// vector of structs. +template ::value, bool> = true> +ABSL_MUST_USE_RESULT RtcEventLogParseStatus +PopulateRtcEventMember(const rtc::ArrayView values, + T E::*member, + rtc::ArrayView output) { + size_t batch_size = values.size(); + RTC_CHECK_EQ(output.size(), batch_size); + for (size_t i = 0; i < batch_size; ++i) { + output[i].*member = DecodeFromUnsignedToType(values[i]); + } + return RtcEventLogParseStatus::Success(); +} + +// Same as above, but for optional fields. +template ::value, bool> = true> +ABSL_MUST_USE_RESULT RtcEventLogParseStatus +PopulateRtcEventMember(const rtc::ArrayView positions, + const rtc::ArrayView values, + absl::optional E::*member, + rtc::ArrayView output) { + size_t batch_size = positions.size(); + RTC_CHECK_EQ(output.size(), batch_size); + RTC_CHECK_LE(values.size(), batch_size); + auto value_it = values.begin(); + for (size_t i = 0; i < batch_size; ++i) { + if (positions[i]) { + RTC_CHECK(value_it != values.end()); + output[i].*member = DecodeFromUnsignedToType(value_it); + ++value_it; + } else { + output[i].*member = absl::nullopt; + } + } + RTC_CHECK(value_it == values.end()); + return RtcEventLogParseStatus::Success(); +} + +// Same as above, but for enum fields. +template ::value, bool> = true> +ABSL_MUST_USE_RESULT RtcEventLogParseStatus +PopulateRtcEventMember(const rtc::ArrayView values, + T E::*member, + rtc::ArrayView output) { + size_t batch_size = values.size(); + RTC_CHECK_EQ(output.size(), batch_size); + for (size_t i = 0; i < batch_size; ++i) { + auto result = RtcEventLogEnum::Decode(values[i]); + if (!result.ok()) { + return result.status(); + } + output[i].*member = result.value(); + } + return RtcEventLogParseStatus::Success(); +} + +// Same as above, but for string fields. +template +ABSL_MUST_USE_RESULT RtcEventLogParseStatus +PopulateRtcEventMember(const rtc::ArrayView values, + std::string E::*member, + rtc::ArrayView output) { + size_t batch_size = values.size(); + RTC_CHECK_EQ(output.size(), batch_size); + for (size_t i = 0; i < batch_size; ++i) { + output[i].*member = values[i]; + } + return RtcEventLogParseStatus::Success(); +} + +// Same as above, but for Timestamp fields. +// N.B. Assumes that the encoded value uses millisecond precision. +template +ABSL_MUST_USE_RESULT RtcEventLogParseStatus +PopulateRtcEventTimestamp(const rtc::ArrayView& values, + Timestamp E::*timestamp, + rtc::ArrayView output) { + size_t batch_size = values.size(); + RTC_CHECK_EQ(batch_size, output.size()); + for (size_t i = 0; i < batch_size; ++i) { + output[i].*timestamp = + Timestamp::Millis(DecodeFromUnsignedToType(values[i])); + } + return RtcEventLogParseStatus::Success(); +} + +template +rtc::ArrayView ExtendLoggedBatch(std::vector& output, + size_t new_elements) { + size_t old_size = output.size(); + output.insert(output.end(), old_size + new_elements, E()); + rtc::ArrayView output_batch = output; + output_batch.subview(old_size); + RTC_DCHECK_EQ(output_batch.size(), new_elements); + return output_batch; +} + +} // namespace webrtc +#endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_FIELD_ENCODING_PARSER_H_ diff --git a/logging/rtc_event_log/events/rtc_event_field_encoding_unittest.cc b/logging/rtc_event_log/events/rtc_event_field_encoding_unittest.cc new file mode 100644 index 0000000000..b1554694ad --- /dev/null +++ b/logging/rtc_event_log/events/rtc_event_field_encoding_unittest.cc @@ -0,0 +1,885 @@ +/* Copyright (c) 2021 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "logging/rtc_event_log/events/rtc_event_field_encoding.h" + +#include +#include +#include + +#include "api/rtc_event_log/rtc_event.h" +#include "logging/rtc_event_log/encoder/var_int.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" +#include "test/gtest.h" + +namespace webrtc { + +namespace { +constexpr int32_t kInt32Max = std::numeric_limits::max(); +constexpr int32_t kInt32Min = std::numeric_limits::min(); +constexpr uint32_t kUint32Max = std::numeric_limits::max(); +constexpr int64_t kInt64Max = std::numeric_limits::max(); +constexpr int64_t kInt64Min = std::numeric_limits::min(); +constexpr uint64_t kUint64Max = std::numeric_limits::max(); + +template ::value, bool> = true> +size_t ExpectedVarIntSize(T value) { + size_t bytes = 0; + uint64_t x = EncodeAsUnsigned(value); + do { + ++bytes; + x = x >> 7; + } while (x > 0); + return bytes; +} + +template ::value, bool> = true> +size_t ExpectedBaseValueSize(const FieldParameters& params, T value) { + switch (params.field_type) { + case FieldType::kFixed8: + return 1; + case FieldType::kFixed32: + return 4; + case FieldType::kFixed64: + return 8; + case FieldType::kVarInt: + return ExpectedVarIntSize(value); + default: + break; + } + RTC_DCHECK_NOTREACHED(); + return 0; +} + +template ::value, bool> = true> +size_t ExpectedEncodingSize(const FieldParameters& params, + const std::vector& v, + size_t expected_bits_per_delta) { + if (v.size() == 0) + return 0; + + uint64_t numeric_field_type = static_cast(params.field_type); + RTC_DCHECK_LT(numeric_field_type, 1u << 3); + size_t tag_size = + ExpectedVarIntSize((params.field_id << 3) + numeric_field_type); + T base = v[0]; + size_t base_size = ExpectedBaseValueSize(params, base); + if (v.size() == 1) + return tag_size + base_size; + + size_t delta_header_size = 1; + // Check if there is an element *not* equal to base. + if (std::all_of(v.begin(), v.end(), [base](T x) { return x == base; })) { + return tag_size + base_size + delta_header_size; + } + + size_t delta_size = ((v.size() - 1) * expected_bits_per_delta + 7) / 8; + return tag_size + base_size + delta_header_size + delta_size; +} + +template ::value, bool> = true> +size_t ExpectedEncodingSize(const FieldParameters& params, + const std::vector>& v, + size_t expected_bits_per_delta) { + size_t num_existing_values = + v.size() - std::count(v.begin(), v.end(), absl::nullopt); + auto first_existing_value = std::find_if( + v.begin(), v.end(), [](absl::optional x) { return x.has_value(); }); + if (num_existing_values == 0) + return 0; + + uint64_t numeric_field_type = static_cast(params.field_type); + RTC_DCHECK_LT(numeric_field_type, 1u << 3); + size_t tag_size = + ExpectedVarIntSize((params.field_id << 3) + numeric_field_type); + T base = first_existing_value->value(); + size_t base_size = ExpectedBaseValueSize(params, base); + if (num_existing_values == 1 && v.size() == 1) + return tag_size + base_size; + + size_t delta_header_size = (num_existing_values == v.size() ? 1 : 2); + size_t positions_size = + (num_existing_values == v.size() ? 0 : (v.size() + 7) / 8); + // Check if there is an element *not* equal to base. + if (std::all_of(v.begin(), v.end(), + [base](absl::optional x) { return x == base; })) { + return tag_size + base_size + delta_header_size + positions_size; + } + + size_t delta_size = + ((num_existing_values - 1) * expected_bits_per_delta + 7) / 8; + return tag_size + base_size + delta_header_size + positions_size + delta_size; +} + +size_t ExpectedStringEncodingSize(const FieldParameters& params, + const std::vector& values) { + EXPECT_EQ(params.field_type, FieldType::kString); + uint64_t numeric_field_type = static_cast(params.field_type); + RTC_DCHECK_LT(numeric_field_type, 1u << 3); + size_t tag_size = + ExpectedVarIntSize((params.field_id << 3) + numeric_field_type); + + size_t expected_size = tag_size; + if (values.size() > 1) { + // VarInt encoding header reserved for future use. Currently always 0. + expected_size += 1; + } + for (const auto& s : values) { + expected_size += ExpectedVarIntSize(s.size()); + expected_size += s.size(); + } + return expected_size; +} + +} // namespace + +class RtcTestEvent final : public RtcEvent { + public: + RtcTestEvent(bool b, + int32_t signed32, + uint32_t unsigned32, + int64_t signed64, + uint64_t unsigned64) + : b_(b), + signed32_(signed32), + unsigned32_(unsigned32), + signed64_(signed64), + unsigned64_(unsigned64) {} + RtcTestEvent(bool b, + int32_t signed32, + uint32_t unsigned32, + int64_t signed64, + uint64_t unsigned64, + absl::optional optional_signed32, + absl::optional optional_signed64, + uint32_t wrapping21, + std::string string) + : b_(b), + signed32_(signed32), + unsigned32_(unsigned32), + signed64_(signed64), + unsigned64_(unsigned64), + optional_signed32_(optional_signed32), + optional_signed64_(optional_signed64), + wrapping21_(wrapping21), + string_(string) {} + ~RtcTestEvent() override = default; + + Type GetType() const override { return static_cast(4711); } + bool IsConfigEvent() const override { return false; } + + static constexpr EventParameters event_params{ + "TestEvent", static_cast(4711)}; + static constexpr FieldParameters timestamp_params{ + "timestamp_ms", FieldParameters::kTimestampField, FieldType::kVarInt, 64}; + static constexpr FieldParameters bool_params{"b", 2, FieldType::kFixed8, 1}; + static constexpr FieldParameters signed32_params{"signed32", 3, + FieldType::kVarInt, 32}; + static constexpr FieldParameters unsigned32_params{"unsigned32", 4, + FieldType::kFixed32, 32}; + static constexpr FieldParameters signed64_params{"signed64", 5, + FieldType::kFixed64, 64}; + static constexpr FieldParameters unsigned64_params{"unsigned64", 6, + FieldType::kVarInt, 64}; + static constexpr FieldParameters optional32_params{"optional_signed32", 7, + FieldType::kFixed32, 32}; + static constexpr FieldParameters optional64_params{"optional_signed64", 8, + FieldType::kVarInt, 64}; + static constexpr FieldParameters wrapping21_params{"wrapping21", 9, + FieldType::kFixed32, 21}; + static constexpr FieldParameters string_params{ + "string", 10, FieldType::kString, /*value_width = */ 0}; + + static constexpr Type kType = static_cast(4711); + + const bool b_; + const int32_t signed32_; + const uint32_t unsigned32_; + const int64_t signed64_; + const uint64_t unsigned64_; + const absl::optional optional_signed32_ = absl::nullopt; + const absl::optional optional_signed64_ = absl::nullopt; + const uint32_t wrapping21_ = 0; + const std::string string_; +}; + +constexpr EventParameters RtcTestEvent::event_params; +constexpr FieldParameters RtcTestEvent::timestamp_params; +constexpr FieldParameters RtcTestEvent::bool_params; +constexpr FieldParameters RtcTestEvent::signed32_params; +constexpr FieldParameters RtcTestEvent::unsigned32_params; +constexpr FieldParameters RtcTestEvent::signed64_params; +constexpr FieldParameters RtcTestEvent::unsigned64_params; + +constexpr FieldParameters RtcTestEvent::optional32_params; +constexpr FieldParameters RtcTestEvent::optional64_params; +constexpr FieldParameters RtcTestEvent::wrapping21_params; +constexpr FieldParameters RtcTestEvent::string_params; + +constexpr RtcEvent::Type RtcTestEvent::kType; + +class RtcEventFieldTest : public ::testing::Test { + protected: + void SetUp() override {} + + void CreateFullEvents( + const std::vector& bool_values, + const std::vector& signed32_values, + const std::vector& unsigned32_values, + const std::vector& signed64_values, + const std::vector& unsigned64_values, + const std::vector>& optional32_values, + const std::vector>& optional64_values, + const std::vector& wrapping21_values, + const std::vector& string_values) { + size_t size = bool_values.size(); + RTC_CHECK_EQ(signed32_values.size(), size); + RTC_CHECK_EQ(unsigned32_values.size(), size); + RTC_CHECK_EQ(signed64_values.size(), size); + RTC_CHECK_EQ(unsigned64_values.size(), size); + RTC_CHECK_EQ(optional32_values.size(), size); + RTC_CHECK_EQ(optional64_values.size(), size); + RTC_CHECK_EQ(wrapping21_values.size(), size); + RTC_CHECK_EQ(string_values.size(), size); + + for (size_t i = 0; i < size; i++) { + batch_.push_back(new RtcTestEvent( + bool_values[i], signed32_values[i], unsigned32_values[i], + signed64_values[i], unsigned64_values[i], optional32_values[i], + optional64_values[i], wrapping21_values[i], string_values[i])); + } + } + + void PrintBytes(const std::string& s) { + for (auto c : s) { + fprintf(stderr, "%d ", static_cast(c)); + } + fprintf(stderr, "\n"); + } + + void ParseEventHeader(absl::string_view encoded_event) { + uint64_t event_tag; + bool success; + std::tie(success, encoded_event) = DecodeVarInt(encoded_event, &event_tag); + ASSERT_TRUE(success); + uint64_t event_id = event_tag >> 1; + ASSERT_EQ(event_id, static_cast(RtcTestEvent::event_params.id)); + bool batched = event_tag & 1u; + ASSERT_EQ(batched, batch_.size() > 1u); + + uint64_t size; + std::tie(success, encoded_event) = DecodeVarInt(encoded_event, &size); + ASSERT_EQ(encoded_event.size(), size); + + ASSERT_TRUE(parser_.Initialize(encoded_event, batched).ok()); + } + + void ParseAndVerifyTimestamps() { + auto result = parser_.ParseNumericField(RtcTestEvent::timestamp_params); + ASSERT_TRUE(result.ok()) << result.message().c_str(); + ASSERT_EQ(result.value().size(), batch_.size()); + for (size_t i = 0; i < batch_.size(); i++) { + EXPECT_EQ(result.value()[i], + static_cast(batch_[i]->timestamp_ms())); + } + } + + void ParseAndVerifyStringField( + const FieldParameters& params, + const std::vector& expected_values, + size_t expected_skipped_bytes = 0) { + size_t expected_size = ExpectedStringEncodingSize(params, expected_values) + + expected_skipped_bytes; + size_t size_before = parser_.RemainingBytes(); + auto result = parser_.ParseStringField(params); + ASSERT_TRUE(result.ok()) << result.message().c_str(); + ASSERT_EQ(result.value().size(), expected_values.size()); + for (size_t i = 0; i < expected_values.size(); i++) { + EXPECT_EQ(result.value()[i], expected_values[i]); + } + size_t size_after = parser_.RemainingBytes(); + EXPECT_EQ(size_before - size_after, expected_size) + << " for field " << params.name; + } + + template + void ParseAndVerifyField(const FieldParameters& params, + const std::vector& expected_values, + size_t expected_bits_per_delta, + size_t expected_skipped_bytes = 0) { + size_t expected_size = + ExpectedEncodingSize(params, expected_values, expected_bits_per_delta) + + expected_skipped_bytes; + size_t size_before = parser_.RemainingBytes(); + auto result = parser_.ParseNumericField(params); + ASSERT_TRUE(result.ok()) << result.message().c_str(); + ASSERT_EQ(result.value().size(), expected_values.size()); + for (size_t i = 0; i < expected_values.size(); i++) { + EXPECT_EQ(DecodeFromUnsignedToType(result.value()[i]), + expected_values[i]); + } + size_t size_after = parser_.RemainingBytes(); + EXPECT_EQ(size_before - size_after, expected_size) + << " for field " << params.name; + } + + template + void ParseAndVerifyOptionalField( + const FieldParameters& params, + const std::vector>& expected_values, + size_t expected_bits_per_delta, + size_t expected_skipped_bytes = 0) { + size_t expected_size = + ExpectedEncodingSize(params, expected_values, expected_bits_per_delta) + + expected_skipped_bytes; + size_t size_before = parser_.RemainingBytes(); + auto result = parser_.ParseOptionalNumericField(params); + ASSERT_TRUE(result.ok()) << result.message().c_str(); + rtc::ArrayView values = result.value().values; + rtc::ArrayView positions = result.value().positions; + ASSERT_EQ(positions.size(), expected_values.size()); + auto value_it = values.begin(); + for (size_t i = 0; i < expected_values.size(); i++) { + if (positions[i]) { + ASSERT_NE(value_it, values.end()); + ASSERT_TRUE(expected_values[i].has_value()); + EXPECT_EQ(DecodeFromUnsignedToType(*value_it), + expected_values[i].value()); + ++value_it; + } else { + EXPECT_EQ(absl::nullopt, expected_values[i]); + } + } + EXPECT_EQ(value_it, values.end()); + size_t size_after = parser_.RemainingBytes(); + EXPECT_EQ(size_before - size_after, expected_size); + } + + void ParseAndVerifyMissingField(const FieldParameters& params) { + auto result = parser_.ParseNumericField(params, /*required_field=*/false); + ASSERT_TRUE(result.ok()) << result.message().c_str(); + EXPECT_EQ(result.value().size(), 0u); + } + + void ParseAndVerifyMissingOptionalField(const FieldParameters& params) { + auto result = + parser_.ParseOptionalNumericField(params, /*required_field=*/false); + ASSERT_TRUE(result.ok()) << result.message().c_str(); + rtc::ArrayView values = result.value().values; + rtc::ArrayView positions = result.value().positions; + EXPECT_EQ(positions.size(), 0u); + EXPECT_EQ(values.size(), 0u); + } + + void TearDown() override { + for (const RtcEvent* event : batch_) { + delete event; + } + } + + std::vector batch_; + EventParser parser_; +}; + +TEST_F(RtcEventFieldTest, EmptyList) { + EventEncoder encoder(RtcTestEvent::event_params, batch_); + encoder.EncodeField(RtcTestEvent::bool_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::b_)); + std::string s = encoder.AsString(); + EXPECT_TRUE(s.empty()); +} + +TEST_F(RtcEventFieldTest, Singleton) { + std::vector bool_values = {true}; + std::vector signed32_values = {-2}; + std::vector unsigned32_values = {123456789}; + std::vector signed64_values = {-9876543210}; + std::vector unsigned64_values = {9876543210}; + std::vector> optional32_values = {kInt32Min}; + std::vector> optional64_values = {kInt64Max}; + std::vector wrapping21_values = {(1 << 21) - 1}; + std::vector string_values = {"foo"}; + + CreateFullEvents(bool_values, signed32_values, unsigned32_values, + signed64_values, unsigned64_values, optional32_values, + optional64_values, wrapping21_values, string_values); + + EventEncoder encoder(RtcTestEvent::event_params, batch_); + encoder.EncodeField(RtcTestEvent::bool_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::b_)); + encoder.EncodeField(RtcTestEvent::signed32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::signed32_)); + encoder.EncodeField( + RtcTestEvent::unsigned32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::unsigned32_)); + encoder.EncodeField(RtcTestEvent::signed64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::signed64_)); + encoder.EncodeField( + RtcTestEvent::unsigned64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::unsigned64_)); + encoder.EncodeField( + RtcTestEvent::optional32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::optional_signed32_)); + encoder.EncodeField( + RtcTestEvent::optional64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::optional_signed64_)); + encoder.EncodeField( + RtcTestEvent::wrapping21_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::wrapping21_)); + encoder.EncodeField(RtcTestEvent::string_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::string_)); + std::string s = encoder.AsString(); + + // Optional debug printing + // PrintBytes(s); + + ParseEventHeader(s); + ParseAndVerifyTimestamps(); + ParseAndVerifyField(RtcTestEvent::bool_params, bool_values, + /*no deltas*/ 0); + ParseAndVerifyField(RtcTestEvent::signed32_params, signed32_values, + /*no deltas*/ 0); + ParseAndVerifyField(RtcTestEvent::unsigned32_params, unsigned32_values, + /*no deltas*/ 0); + ParseAndVerifyField(RtcTestEvent::signed64_params, signed64_values, + /*no deltas*/ 0); + ParseAndVerifyField(RtcTestEvent::unsigned64_params, unsigned64_values, + /*no deltas*/ 0); + ParseAndVerifyOptionalField(RtcTestEvent::optional32_params, + optional32_values, /*no deltas*/ 0); + ParseAndVerifyOptionalField(RtcTestEvent::optional64_params, + optional64_values, /*no deltas*/ 0); + ParseAndVerifyField(RtcTestEvent::wrapping21_params, wrapping21_values, + /*no deltas*/ 0); + ParseAndVerifyStringField(RtcTestEvent::string_params, string_values); + EXPECT_EQ(parser_.RemainingBytes(), 0u); +} + +TEST_F(RtcEventFieldTest, EqualElements) { + std::vector bool_values = {true, true, true, true}; + std::vector signed32_values = {-2, -2, -2, -2}; + std::vector unsigned32_values = {123456789, 123456789, 123456789, + 123456789}; + std::vector signed64_values = {-9876543210, -9876543210, -9876543210, + -9876543210}; + std::vector unsigned64_values = {9876543210, 9876543210, 9876543210, + 9876543210}; + std::vector> optional32_values = { + kInt32Min, kInt32Min, kInt32Min, kInt32Min}; + std::vector> optional64_values = { + kInt64Max, kInt64Max, kInt64Max, kInt64Max}; + std::vector wrapping21_values = {(1 << 21) - 1, (1 << 21) - 1, + (1 << 21) - 1, (1 << 21) - 1}; + std::vector string_values = {"foo", "foo", "foo", "foo"}; + + CreateFullEvents(bool_values, signed32_values, unsigned32_values, + signed64_values, unsigned64_values, optional32_values, + optional64_values, wrapping21_values, string_values); + EventEncoder encoder(RtcTestEvent::event_params, batch_); + encoder.EncodeField(RtcTestEvent::bool_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::b_)); + encoder.EncodeField(RtcTestEvent::signed32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::signed32_)); + encoder.EncodeField( + RtcTestEvent::unsigned32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::unsigned32_)); + encoder.EncodeField(RtcTestEvent::signed64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::signed64_)); + encoder.EncodeField( + RtcTestEvent::unsigned64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::unsigned64_)); + encoder.EncodeField( + RtcTestEvent::optional32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::optional_signed32_)); + encoder.EncodeField( + RtcTestEvent::optional64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::optional_signed64_)); + encoder.EncodeField( + RtcTestEvent::wrapping21_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::wrapping21_)); + encoder.EncodeField(RtcTestEvent::string_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::string_)); + std::string s = encoder.AsString(); + + // Optional debug printing + // PrintBytes(s); + + ParseEventHeader(s); + ParseAndVerifyTimestamps(); + ParseAndVerifyField(RtcTestEvent::bool_params, bool_values, + /*no deltas*/ 0); + ParseAndVerifyField(RtcTestEvent::signed32_params, signed32_values, + /*no deltas*/ 0); + ParseAndVerifyField(RtcTestEvent::unsigned32_params, unsigned32_values, + /*no deltas*/ 0); + ParseAndVerifyField(RtcTestEvent::signed64_params, signed64_values, + /*no deltas*/ 0); + ParseAndVerifyField(RtcTestEvent::unsigned64_params, unsigned64_values, + /*no deltas*/ 0); + ParseAndVerifyOptionalField(RtcTestEvent::optional32_params, + optional32_values, /*no deltas*/ 0); + ParseAndVerifyOptionalField(RtcTestEvent::optional64_params, + optional64_values, /*no deltas*/ 0); + ParseAndVerifyField(RtcTestEvent::wrapping21_params, wrapping21_values, + /*no deltas*/ 0); + ParseAndVerifyStringField(RtcTestEvent::string_params, string_values); + EXPECT_EQ(parser_.RemainingBytes(), 0u); +} + +TEST_F(RtcEventFieldTest, Increasing) { + std::vector bool_values = {false, true, false, true}; + std::vector signed32_values = {-2, -1, 0, 1}; + std::vector unsigned32_values = {kUint32Max - 1, kUint32Max, 0, 1}; + std::vector signed64_values = {kInt64Max - 1, kInt64Max, kInt64Min, + kInt64Min + 1}; + std::vector unsigned64_values = {kUint64Max - 1, kUint64Max, 0, 1}; + std::vector> optional32_values = { + kInt32Max - 1, kInt32Max, kInt32Min, kInt32Min + 1}; + std::vector> optional64_values = { + kInt64Max - 1, kInt64Max, kInt64Min, kInt64Min + 1}; + std::vector wrapping21_values = {(1 << 21) - 2, (1 << 21) - 1, 0, + 1}; + std::vector string_values = { + "", "a", "bc", "def"}; // No special compression of strings. + + CreateFullEvents(bool_values, signed32_values, unsigned32_values, + signed64_values, unsigned64_values, optional32_values, + optional64_values, wrapping21_values, string_values); + + EventEncoder encoder(RtcTestEvent::event_params, batch_); + encoder.EncodeField(RtcTestEvent::bool_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::b_)); + encoder.EncodeField(RtcTestEvent::signed32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::signed32_)); + encoder.EncodeField( + RtcTestEvent::unsigned32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::unsigned32_)); + encoder.EncodeField(RtcTestEvent::signed64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::signed64_)); + encoder.EncodeField( + RtcTestEvent::unsigned64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::unsigned64_)); + encoder.EncodeField( + RtcTestEvent::optional32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::optional_signed32_)); + encoder.EncodeField( + RtcTestEvent::optional64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::optional_signed64_)); + encoder.EncodeField( + RtcTestEvent::wrapping21_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::wrapping21_)); + encoder.EncodeField(RtcTestEvent::string_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::string_)); + std::string s = encoder.AsString(); + + // Optional debug printing + // PrintBytes(s); + + ParseEventHeader(s); + ParseAndVerifyTimestamps(); + ParseAndVerifyField(RtcTestEvent::bool_params, bool_values, + /*delta bits*/ 1); + ParseAndVerifyField(RtcTestEvent::signed32_params, signed32_values, + /*delta bits*/ 1); + ParseAndVerifyField(RtcTestEvent::unsigned32_params, unsigned32_values, + /*delta bits*/ 1); + ParseAndVerifyField(RtcTestEvent::signed64_params, signed64_values, + /*delta bits*/ 1); + ParseAndVerifyField(RtcTestEvent::unsigned64_params, unsigned64_values, + /*delta bits*/ 1); + ParseAndVerifyOptionalField(RtcTestEvent::optional32_params, + optional32_values, /*delta bits*/ 1); + ParseAndVerifyOptionalField(RtcTestEvent::optional64_params, + optional64_values, /*delta bits*/ 1); + ParseAndVerifyField(RtcTestEvent::wrapping21_params, wrapping21_values, + /*delta bits*/ 1); + ParseAndVerifyStringField(RtcTestEvent::string_params, string_values); + EXPECT_EQ(parser_.RemainingBytes(), 0u); +} + +TEST_F(RtcEventFieldTest, Decreasing) { + std::vector bool_values = {true, false, true, false}; + std::vector signed32_values = {2, 1, 0, -1}; + std::vector unsigned32_values = {1, 0, kUint32Max, kUint32Max - 1}; + std::vector signed64_values = {kInt64Min + 1, kInt64Min, kInt64Max, + kInt64Max - 1}; + std::vector unsigned64_values = {1, 0, kUint64Max, kUint64Max - 1}; + std::vector> optional32_values = { + kInt32Min + 1, kInt32Min, kInt32Max, kInt32Max - 1}; + std::vector> optional64_values = { + kInt64Min + 1, kInt64Min, kInt64Max, kInt64Max - 1}; + std::vector wrapping21_values = {1, 0, (1 << 21) - 1, + (1 << 21) - 2}; + std::vector string_values = { + "def", "bc", "a", ""}; // No special compression of strings. + + CreateFullEvents(bool_values, signed32_values, unsigned32_values, + signed64_values, unsigned64_values, optional32_values, + optional64_values, wrapping21_values, string_values); + + EventEncoder encoder(RtcTestEvent::event_params, batch_); + encoder.EncodeField(RtcTestEvent::bool_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::b_)); + encoder.EncodeField(RtcTestEvent::signed32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::signed32_)); + encoder.EncodeField( + RtcTestEvent::unsigned32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::unsigned32_)); + encoder.EncodeField(RtcTestEvent::signed64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::signed64_)); + encoder.EncodeField( + RtcTestEvent::unsigned64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::unsigned64_)); + encoder.EncodeField( + RtcTestEvent::optional32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::optional_signed32_)); + encoder.EncodeField( + RtcTestEvent::optional64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::optional_signed64_)); + encoder.EncodeField( + RtcTestEvent::wrapping21_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::wrapping21_)); + encoder.EncodeField(RtcTestEvent::string_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::string_)); + std::string s = encoder.AsString(); + + // Optional debug printing + // PrintBytes(s); + + ParseEventHeader(s); + ParseAndVerifyTimestamps(); + ParseAndVerifyField(RtcTestEvent::bool_params, bool_values, + /*delta bits*/ 1); + ParseAndVerifyField(RtcTestEvent::signed32_params, signed32_values, + /*delta bits*/ 1); + ParseAndVerifyField(RtcTestEvent::unsigned32_params, unsigned32_values, + /*delta bits*/ 1); + ParseAndVerifyField(RtcTestEvent::signed64_params, signed64_values, + /*delta bits*/ 1); + ParseAndVerifyField(RtcTestEvent::unsigned64_params, unsigned64_values, + /*delta bits*/ 1); + ParseAndVerifyOptionalField(RtcTestEvent::optional32_params, + optional32_values, /*delta bits*/ 1); + ParseAndVerifyOptionalField(RtcTestEvent::optional64_params, + optional64_values, /*delta bits*/ 1); + ParseAndVerifyField(RtcTestEvent::wrapping21_params, wrapping21_values, + /*delta bits*/ 1); + ParseAndVerifyStringField(RtcTestEvent::string_params, string_values); + EXPECT_EQ(parser_.RemainingBytes(), 0u); +} + +TEST_F(RtcEventFieldTest, SkipsDeprecatedFields) { + // Expect parser to skip fields it doesn't recognize, but find subsequent + // fields. + std::vector bool_values = {true, false}; + std::vector signed32_values = {kInt32Min / 2, kInt32Max / 2}; + std::vector unsigned32_values = {0, kUint32Max / 2}; + std::vector signed64_values = {kInt64Min / 2, kInt64Max / 2}; + std::vector unsigned64_values = {0, kUint64Max / 2}; + std::vector> optional32_values = {kInt32Max / 2, + kInt32Min / 2}; + std::vector> optional64_values = {kInt64Min / 2, + kInt64Max / 2}; + std::vector wrapping21_values = {0, 1 << 20}; + std::vector string_values = {"foo", "bar"}; + + size_t signed32_encoding_size = + /*tag*/ 1 + /* varint base*/ 5 + /* delta_header*/ 1 + /*deltas*/ 4; + size_t signed64_encoding_size = + /*tag*/ 1 + /* fixed64 base*/ 8 + /* delta_header*/ 1 + /*deltas*/ 8; + size_t optional32_encoding_size = + /*tag*/ 1 + /* fixed32 base*/ 4 + /* delta_header*/ 1 + /*deltas*/ 4; + + CreateFullEvents(bool_values, signed32_values, unsigned32_values, + signed64_values, unsigned64_values, optional32_values, + optional64_values, wrapping21_values, string_values); + + EventEncoder encoder(RtcTestEvent::event_params, batch_); + encoder.EncodeField(RtcTestEvent::bool_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::b_)); + encoder.EncodeField(RtcTestEvent::signed32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::signed32_)); + encoder.EncodeField( + RtcTestEvent::unsigned32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::unsigned32_)); + encoder.EncodeField(RtcTestEvent::signed64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::signed64_)); + encoder.EncodeField( + RtcTestEvent::unsigned64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::unsigned64_)); + encoder.EncodeField( + RtcTestEvent::optional32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::optional_signed32_)); + encoder.EncodeField( + RtcTestEvent::optional64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::optional_signed64_)); + encoder.EncodeField( + RtcTestEvent::wrapping21_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::wrapping21_)); + encoder.EncodeField(RtcTestEvent::string_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::string_)); + std::string s = encoder.AsString(); + + // Optional debug printing + // PrintBytes(s); + + ParseEventHeader(s); + ParseAndVerifyTimestamps(); + ParseAndVerifyField(RtcTestEvent::bool_params, bool_values, + /*delta_bits=*/1); + // Skips parsing the `signed32_values`. The following unsigned fields should + // still be found. + ParseAndVerifyField(RtcTestEvent::unsigned32_params, unsigned32_values, + /*delta_bits=*/31, + /*expected_skipped_bytes=*/signed32_encoding_size); + // Skips parsing the `signed64_values`. The following unsigned fields should + // still be found. + ParseAndVerifyField(RtcTestEvent::unsigned64_params, unsigned64_values, + /*delta_bits=*/63, signed64_encoding_size); + // Skips parsing the `optional32_values`. The following unsigned fields should + // still be found. + ParseAndVerifyOptionalField(RtcTestEvent::optional64_params, + optional64_values, + /*delta_bits=*/63, optional32_encoding_size); + ParseAndVerifyField(RtcTestEvent::wrapping21_params, wrapping21_values, + /*delta_bits=*/20); + ParseAndVerifyStringField(RtcTestEvent::string_params, string_values); + EXPECT_EQ(parser_.RemainingBytes(), 0u); +} + +TEST_F(RtcEventFieldTest, SkipsMissingFields) { + // Expect parsing of missing field to succeed but return an empty list. + + std::vector bool_values = {true, false}; + std::vector signed32_values = {kInt32Min / 2, kInt32Max / 2}; + std::vector unsigned32_values = {0, kUint32Max / 2}; + std::vector signed64_values = {kInt64Min / 2, kInt64Max / 2}; + std::vector unsigned64_values = {0, kUint64Max / 2}; + std::vector> optional32_values = {kInt32Max / 2, + kInt32Min / 2}; + std::vector> optional64_values = {kInt64Min / 2, + kInt64Max / 2}; + std::vector wrapping21_values = {0, 1 << 20}; + std::vector string_values = {"foo", "foo"}; + + CreateFullEvents(bool_values, signed32_values, unsigned32_values, + signed64_values, unsigned64_values, optional32_values, + optional64_values, wrapping21_values, string_values); + + EventEncoder encoder(RtcTestEvent::event_params, batch_); + // Skip encoding the `bool_values`. + encoder.EncodeField(RtcTestEvent::signed32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::signed32_)); + // Skip encoding the `unsigned32_values`. + encoder.EncodeField(RtcTestEvent::signed64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::signed64_)); + // Skip encoding the `unsigned64_values`. + encoder.EncodeField( + RtcTestEvent::optional32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::optional_signed32_)); + // Skip encoding the `optional64_values`. + encoder.EncodeField( + RtcTestEvent::wrapping21_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::wrapping21_)); + encoder.EncodeField(RtcTestEvent::string_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::string_)); + std::string s = encoder.AsString(); + + // Optional debug printing + // PrintBytes(s); + + ParseEventHeader(s); + ParseAndVerifyTimestamps(); + ParseAndVerifyMissingField(RtcTestEvent::bool_params); + ParseAndVerifyField(RtcTestEvent::signed32_params, signed32_values, + /*delta_bits=*/31); + ParseAndVerifyMissingField(RtcTestEvent::unsigned32_params); + ParseAndVerifyField(RtcTestEvent::signed64_params, signed64_values, + /*delta_bits=*/63); + ParseAndVerifyMissingField(RtcTestEvent::unsigned64_params); + ParseAndVerifyOptionalField(RtcTestEvent::optional32_params, + optional32_values, /*delta_bits=*/31); + ParseAndVerifyMissingOptionalField(RtcTestEvent::optional64_params); + ParseAndVerifyField(RtcTestEvent::wrapping21_params, wrapping21_values, + /*delta_bits=*/20); + ParseAndVerifyStringField(RtcTestEvent::string_params, string_values); + EXPECT_EQ(parser_.RemainingBytes(), 0u); +} + +TEST_F(RtcEventFieldTest, OptionalFields) { + std::vector> optional32_values = { + 2, absl::nullopt, 4, absl::nullopt, 6, absl::nullopt}; + std::vector> optional64_values = { + absl::nullopt, 1024, absl::nullopt, 1025, absl::nullopt, 1026}; + std::vector wrapping21_values = {(1 << 21) - 3, 0, 2, 5, 5, 6}; + + for (size_t i = 0; i < optional32_values.size(); i++) { + batch_.push_back(new RtcTestEvent(0, 0, 0, 0, 0, optional32_values[i], + optional64_values[i], + wrapping21_values[i], "")); + } + + EventEncoder encoder(RtcTestEvent::event_params, batch_); + encoder.EncodeField( + RtcTestEvent::optional32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::optional_signed32_)); + encoder.EncodeField( + RtcTestEvent::optional64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::optional_signed64_)); + encoder.EncodeField( + RtcTestEvent::wrapping21_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::wrapping21_)); + std::string s = encoder.AsString(); + + // Optional debug output + // PrintBytes(s); + + ParseEventHeader(s); + ParseAndVerifyTimestamps(); + ParseAndVerifyOptionalField(RtcTestEvent::optional32_params, + optional32_values, /*delta bits*/ 2); + ParseAndVerifyOptionalField(RtcTestEvent::optional64_params, + optional64_values, /*delta bits*/ 1); + ParseAndVerifyField(RtcTestEvent::wrapping21_params, wrapping21_values, + /*delta bits*/ 2); + EXPECT_EQ(parser_.RemainingBytes(), 0u); +} + +TEST_F(RtcEventFieldTest, AllNulloptTreatedAsMissing) { + std::vector> optional32_values = { + absl::nullopt, absl::nullopt, absl::nullopt, + absl::nullopt, absl::nullopt, absl::nullopt}; + std::vector> optional64_values = { + absl::nullopt, 1024, absl::nullopt, 1025, absl::nullopt, 1026}; + + for (size_t i = 0; i < optional32_values.size(); i++) { + batch_.push_back(new RtcTestEvent(0, 0, 0, 0, 0, optional32_values[i], + optional64_values[i], 0, "")); + } + + EventEncoder encoder(RtcTestEvent::event_params, batch_); + encoder.EncodeField( + RtcTestEvent::optional32_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::optional_signed32_)); + encoder.EncodeField( + RtcTestEvent::optional64_params, + ExtractRtcEventMember(batch_, &RtcTestEvent::optional_signed64_)); + std::string s = encoder.AsString(); + + // Optional debug output + // PrintBytes(s); + + ParseEventHeader(s); + ParseAndVerifyTimestamps(); + ParseAndVerifyMissingOptionalField(RtcTestEvent::optional32_params); + ParseAndVerifyOptionalField(RtcTestEvent::optional64_params, + optional64_values, /*delta_bits=*/1); + EXPECT_EQ(parser_.RemainingBytes(), 0u); +} + +} // namespace webrtc diff --git a/logging/rtc_event_log/events/rtc_event_field_extraction.cc b/logging/rtc_event_log/events/rtc_event_field_extraction.cc new file mode 100644 index 0000000000..99f0b3697c --- /dev/null +++ b/logging/rtc_event_log/events/rtc_event_field_extraction.cc @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "logging/rtc_event_log/events/rtc_event_field_extraction.h" + +#include +#include + +#include "rtc_base/checks.h" + +namespace webrtc_event_logging { + +// The bitwidth required to encode values in the range +// [0, `max_pos_magnitude`] using an unsigned representation. +uint8_t UnsignedBitWidth(uint64_t max_magnitude) { + uint8_t required_bits = 1; + while (max_magnitude >>= 1) { + ++required_bits; + } + return required_bits; +} + +// The bitwidth required to encode signed values in the range +// [-`max_neg_magnitude`, `max_pos_magnitude`] using a signed +// 2-complement representation. +uint8_t SignedBitWidth(uint64_t max_pos_magnitude, uint64_t max_neg_magnitude) { + const uint8_t bitwidth_positive = + max_pos_magnitude > 0 ? UnsignedBitWidth(max_pos_magnitude) : 0; + const uint8_t bitwidth_negative = + (max_neg_magnitude > 1) ? UnsignedBitWidth(max_neg_magnitude - 1) : 0; + return 1 + std::max(bitwidth_positive, bitwidth_negative); +} + +// Return the maximum integer of a given bit width. +uint64_t MaxUnsignedValueOfBitWidth(uint64_t bit_width) { + RTC_DCHECK_GE(bit_width, 1); + RTC_DCHECK_LE(bit_width, 64); + return (bit_width == 64) ? std::numeric_limits::max() + : ((static_cast(1) << bit_width) - 1); +} + +// Computes the delta between `previous` and `current`, under the assumption +// that `bit_mask` is the largest value before wrap-around occurs. The bitmask +// must be of the form 2^x-1. (We use the wrap-around to more efficiently +// compress counters that wrap around at different bit widths than the +// backing C++ data type.) +uint64_t UnsignedDelta(uint64_t previous, uint64_t current, uint64_t bit_mask) { + RTC_DCHECK_LE(previous, bit_mask); + RTC_DCHECK_LE(current, bit_mask); + return (current - previous) & bit_mask; +} + +} // namespace webrtc_event_logging diff --git a/logging/rtc_event_log/events/rtc_event_field_extraction.h b/logging/rtc_event_log/events/rtc_event_field_extraction.h new file mode 100644 index 0000000000..eb9d67f1c2 --- /dev/null +++ b/logging/rtc_event_log/events/rtc_event_field_extraction.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_FIELD_EXTRACTION_H_ +#define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_FIELD_EXTRACTION_H_ + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "api/rtc_event_log/rtc_event.h" +#include "api/units/timestamp.h" +#include "logging/rtc_event_log/encoder/rtc_event_log_encoder_common.h" +#include "rtc_base/logging.h" + +namespace webrtc_event_logging { +uint8_t UnsignedBitWidth(uint64_t max_magnitude); +uint8_t SignedBitWidth(uint64_t max_pos_magnitude, uint64_t max_neg_magnitude); +uint64_t MaxUnsignedValueOfBitWidth(uint64_t bit_width); +uint64_t UnsignedDelta(uint64_t previous, uint64_t current, uint64_t bit_mask); +} // namespace webrtc_event_logging + +namespace webrtc { +template ::value, bool> = true> +uint64_t EncodeAsUnsigned(T value) { + return webrtc_event_logging::ToUnsigned(value); +} + +template ::value, bool> = true> +uint64_t EncodeAsUnsigned(T value) { + return static_cast(value); +} + +template ::value, bool> = true> +T DecodeFromUnsignedToType(uint64_t value) { + T signed_value = 0; + bool success = webrtc_event_logging::ToSigned(value, &signed_value); + if (!success) { + RTC_LOG(LS_ERROR) << "Failed to convert " << value << "to signed type."; + // TODO(terelius): Propagate error? + } + return signed_value; +} + +template ::value, bool> = true> +T DecodeFromUnsignedToType(uint64_t value) { + // TODO(terelius): Check range? + return static_cast(value); +} + +// RtcEventLogEnum defines a mapping between an enum T +// and the event log encodings. To log a new enum type T, +// specialize RtcEventLogEnum and add static methods +// static uint64_t Encode(T x) {} +// static RtcEventLogParseStatusOr Decode(uint64_t x) {} +template +class RtcEventLogEnum { + static_assert(sizeof(T) != sizeof(T), + "Missing specialisation of RtcEventLogEnum for type"); +}; + +// Represents a vector> optional_values +// as a bit-vector `position_mask` which identifies the positions +// of existing values, and a (potentially shorter) +// `vector values` containing the actual values. +// The bit vector is constructed such that position_mask[i] +// is true iff optional_values[i] has a value, and `values.size()` +// is equal to the number of set bits in `position_mask`. +struct ValuesWithPositions { + std::vector position_mask; + std::vector values; +}; + +} // namespace webrtc + +#endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_FIELD_EXTRACTION_H_ diff --git a/logging/rtc_event_log/events/rtc_event_field_extraction_unittest.cc b/logging/rtc_event_log/events/rtc_event_field_extraction_unittest.cc new file mode 100644 index 0000000000..f9fb993af0 --- /dev/null +++ b/logging/rtc_event_log/events/rtc_event_field_extraction_unittest.cc @@ -0,0 +1,97 @@ +/* Copyright (c) 2021 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "logging/rtc_event_log/events/rtc_event_field_extraction.h" + +#include "rtc_base/random.h" +#include "test/gtest.h" + +namespace webrtc { + +TEST(UnsignedBitWidthTest, SmallValues) { + EXPECT_EQ(webrtc_event_logging::UnsignedBitWidth(0), 1u); + EXPECT_EQ(webrtc_event_logging::UnsignedBitWidth(1), 1u); + EXPECT_EQ(webrtc_event_logging::UnsignedBitWidth(2), 2u); + EXPECT_EQ(webrtc_event_logging::UnsignedBitWidth(3), 2u); + EXPECT_EQ(webrtc_event_logging::UnsignedBitWidth(4), 3u); + EXPECT_EQ(webrtc_event_logging::UnsignedBitWidth(5), 3u); + EXPECT_EQ(webrtc_event_logging::UnsignedBitWidth(6), 3u); + EXPECT_EQ(webrtc_event_logging::UnsignedBitWidth(7), 3u); +} + +TEST(UnsignedBitWidthTest, PowersOfTwo) { + EXPECT_EQ(webrtc_event_logging::UnsignedBitWidth(0), 1u); + + for (unsigned i = 0; i < 64; i++) { + uint64_t x = 1; + x = x << i; + EXPECT_EQ(webrtc_event_logging::UnsignedBitWidth(x), i + 1); + } +} + +TEST(UnsignedBitWidthTest, PowersOfTwoMinusOne) { + for (unsigned i = 1; i < 64; i++) { + uint64_t x = 1; + x = (x << i) - 1; + EXPECT_EQ(webrtc_event_logging::UnsignedBitWidth(x), i); + } + + uint64_t x = ~static_cast(0); + EXPECT_EQ(webrtc_event_logging::UnsignedBitWidth(x), 64u); +} + +TEST(UnsignedBitWidthTest, RandomInputs) { + Random rand(12345); + + for (unsigned i = 0; i < 64; i++) { + uint64_t x = 1; + x = x << i; + uint64_t high = rand.Rand(); + uint64_t low = rand.Rand(); + x += ((high << 32) + low) % x; + EXPECT_EQ(webrtc_event_logging::UnsignedBitWidth(x), i + 1); + } +} + +TEST(SignedBitWidthTest, SignedBitWidth) { + EXPECT_EQ(webrtc_event_logging::SignedBitWidth(0, 1), 1u); + EXPECT_EQ(webrtc_event_logging::SignedBitWidth(1, 0), 2u); + EXPECT_EQ(webrtc_event_logging::SignedBitWidth(1, 2), 2u); + EXPECT_EQ(webrtc_event_logging::SignedBitWidth(1, 128), 8u); + EXPECT_EQ(webrtc_event_logging::SignedBitWidth(127, 1), 8u); + EXPECT_EQ(webrtc_event_logging::SignedBitWidth(127, 128), 8u); + EXPECT_EQ(webrtc_event_logging::SignedBitWidth(1, 129), 9u); + EXPECT_EQ(webrtc_event_logging::SignedBitWidth(128, 1), 9u); +} + +TEST(MaxUnsignedValueOfBitWidthTest, MaxUnsignedValueOfBitWidth) { + EXPECT_EQ(webrtc_event_logging::MaxUnsignedValueOfBitWidth(1), 0x01u); + EXPECT_EQ(webrtc_event_logging::MaxUnsignedValueOfBitWidth(6), 0x3Fu); + EXPECT_EQ(webrtc_event_logging::MaxUnsignedValueOfBitWidth(8), 0xFFu); + EXPECT_EQ(webrtc_event_logging::MaxUnsignedValueOfBitWidth(32), 0xFFFFFFFFu); +} + +TEST(EncodeAsUnsignedTest, NegativeValues) { + // Negative values are converted as if cast to unsigned type of + // the same bitsize using 2-complement representation. + int16_t x = -1; + EXPECT_EQ(EncodeAsUnsigned(x), static_cast(0xFFFF)); + int64_t y = -1; + EXPECT_EQ(EncodeAsUnsigned(y), static_cast(0xFFFFFFFFFFFFFFFFull)); +} + +TEST(EncodeAsUnsignedTest, PositiveValues) { + // Postive values are unchanged. + int16_t x = 42; + EXPECT_EQ(EncodeAsUnsigned(x), static_cast(42)); + int64_t y = 42; + EXPECT_EQ(EncodeAsUnsigned(y), static_cast(42)); +} + +} // namespace webrtc diff --git a/logging/rtc_event_log/events/rtc_event_frame_decoded.h b/logging/rtc_event_log/events/rtc_event_frame_decoded.h index 4a6bb90d02..db5162cd0d 100644 --- a/logging/rtc_event_log/events/rtc_event_frame_decoded.h +++ b/logging/rtc_event_log/events/rtc_event_frame_decoded.h @@ -13,14 +13,32 @@ #include +#include #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" #include "api/video/video_codec_type.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" namespace webrtc { +struct LoggedFrameDecoded { + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + int64_t render_time_ms; + uint32_t ssrc; + int width; + int height; + VideoCodecType codec; + uint8_t qp; +}; + class RtcEventFrameDecoded final : public RtcEvent { public: static constexpr Type kType = Type::FrameDecoded; @@ -45,6 +63,19 @@ class RtcEventFrameDecoded final : public RtcEvent { VideoCodecType codec() const { return codec_; } uint8_t qp() const { return qp_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::map>& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventFrameDecoded(const RtcEventFrameDecoded& other); @@ -56,19 +87,6 @@ class RtcEventFrameDecoded final : public RtcEvent { const uint8_t qp_; }; -struct LoggedFrameDecoded { - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - int64_t render_time_ms; - uint32_t ssrc; - int width; - int height; - VideoCodecType codec; - uint8_t qp; -}; - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_FRAME_DECODED_H_ diff --git a/logging/rtc_event_log/events/rtc_event_generic_ack_received.h b/logging/rtc_event_log/events/rtc_event_generic_ack_received.h index 75fc83c8b8..d1ad0f1fef 100644 --- a/logging/rtc_event_log/events/rtc_event_generic_ack_received.h +++ b/logging/rtc_event_log/events/rtc_event_generic_ack_received.h @@ -12,14 +12,37 @@ #define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_GENERIC_ACK_RECEIVED_H_ #include +#include #include +#include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" namespace webrtc { +struct LoggedGenericAckReceived { + LoggedGenericAckReceived() = default; + LoggedGenericAckReceived(Timestamp timestamp, + int64_t packet_number, + int64_t acked_packet_number, + absl::optional receive_acked_packet_time_ms) + : timestamp(timestamp), + packet_number(packet_number), + acked_packet_number(acked_packet_number), + receive_acked_packet_time_ms(receive_acked_packet_time_ms) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + int64_t packet_number; + int64_t acked_packet_number; + absl::optional receive_acked_packet_time_ms; +}; + struct AckedPacket { // The packet number that was acked. int64_t packet_number; @@ -52,18 +75,31 @@ class RtcEventGenericAckReceived final : public RtcEvent { // An identifier of the acked packet. int64_t acked_packet_number() const { return acked_packet_number_; } - // Timestamp when the |acked_packet_number| was received by the remote side. + // Timestamp when the `acked_packet_number` was received by the remote side. absl::optional receive_acked_packet_time_ms() const { return receive_acked_packet_time_ms_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventGenericAckReceived(const RtcEventGenericAckReceived& packet); - // When the ack is received, |packet_number| identifies the packet which - // contained an ack for |acked_packet_number|, and contains the - // |receive_acked_packet_time_ms| on which the |acked_packet_number| was - // received on the remote side. The |receive_acked_packet_time_ms| may be + // When the ack is received, `packet_number` identifies the packet which + // contained an ack for `acked_packet_number`, and contains the + // `receive_acked_packet_time_ms` on which the `acked_packet_number` was + // received on the remote side. The `receive_acked_packet_time_ms` may be // null. RtcEventGenericAckReceived( int64_t timestamp_us, @@ -76,26 +112,6 @@ class RtcEventGenericAckReceived final : public RtcEvent { const absl::optional receive_acked_packet_time_ms_; }; -struct LoggedGenericAckReceived { - LoggedGenericAckReceived() = default; - LoggedGenericAckReceived(Timestamp timestamp, - int64_t packet_number, - int64_t acked_packet_number, - absl::optional receive_acked_packet_time_ms) - : timestamp(timestamp), - packet_number(packet_number), - acked_packet_number(acked_packet_number), - receive_acked_packet_time_ms(receive_acked_packet_time_ms) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - int64_t packet_number; - int64_t acked_packet_number; - absl::optional receive_acked_packet_time_ms; -}; - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_GENERIC_ACK_RECEIVED_H_ diff --git a/logging/rtc_event_log/events/rtc_event_generic_packet_received.h b/logging/rtc_event_log/events/rtc_event_generic_packet_received.h index 428e7b3806..fddee67740 100644 --- a/logging/rtc_event_log/events/rtc_event_generic_packet_received.h +++ b/logging/rtc_event_log/events/rtc_event_generic_packet_received.h @@ -12,12 +12,33 @@ #define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_GENERIC_PACKET_RECEIVED_H_ #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" namespace webrtc { +struct LoggedGenericPacketReceived { + LoggedGenericPacketReceived() = default; + LoggedGenericPacketReceived(Timestamp timestamp, + int64_t packet_number, + int packet_length) + : timestamp(timestamp), + packet_number(packet_number), + packet_length(packet_length) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + int64_t packet_number; + int packet_length; +}; + class RtcEventGenericPacketReceived final : public RtcEvent { public: static constexpr Type kType = Type::GenericPacketReceived; @@ -37,6 +58,19 @@ class RtcEventGenericPacketReceived final : public RtcEvent { // including ICE/TURN/IP overheads. size_t packet_length() const { return packet_length_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventGenericPacketReceived(const RtcEventGenericPacketReceived& packet); @@ -44,23 +78,6 @@ class RtcEventGenericPacketReceived final : public RtcEvent { const size_t packet_length_; }; -struct LoggedGenericPacketReceived { - LoggedGenericPacketReceived() = default; - LoggedGenericPacketReceived(Timestamp timestamp, - int64_t packet_number, - int packet_length) - : timestamp(timestamp), - packet_number(packet_number), - packet_length(packet_length) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - int64_t packet_number; - int packet_length; -}; - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_GENERIC_PACKET_RECEIVED_H_ diff --git a/logging/rtc_event_log/events/rtc_event_generic_packet_sent.h b/logging/rtc_event_log/events/rtc_event_generic_packet_sent.h index 6e626e63a1..0f839ca949 100644 --- a/logging/rtc_event_log/events/rtc_event_generic_packet_sent.h +++ b/logging/rtc_event_log/events/rtc_event_generic_packet_sent.h @@ -12,12 +12,42 @@ #define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_GENERIC_PACKET_SENT_H_ #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" namespace webrtc { +struct LoggedGenericPacketSent { + LoggedGenericPacketSent() = default; + LoggedGenericPacketSent(Timestamp timestamp, + int64_t packet_number, + size_t overhead_length, + size_t payload_length, + size_t padding_length) + : timestamp(timestamp), + packet_number(packet_number), + overhead_length(overhead_length), + payload_length(payload_length), + padding_length(padding_length) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + size_t packet_length() const { + return payload_length + padding_length + overhead_length; + } + Timestamp timestamp = Timestamp::MinusInfinity(); + int64_t packet_number; + size_t overhead_length; + size_t payload_length; + size_t padding_length; +}; + class RtcEventGenericPacketSent final : public RtcEvent { public: static constexpr Type kType = Type::GenericPacketSent; @@ -52,6 +82,19 @@ class RtcEventGenericPacketSent final : public RtcEvent { size_t padding_length() const { return padding_length_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventGenericPacketSent(const RtcEventGenericPacketSent& packet); @@ -61,31 +104,6 @@ class RtcEventGenericPacketSent final : public RtcEvent { const size_t padding_length_; }; -struct LoggedGenericPacketSent { - LoggedGenericPacketSent() = default; - LoggedGenericPacketSent(Timestamp timestamp, - int64_t packet_number, - size_t overhead_length, - size_t payload_length, - size_t padding_length) - : timestamp(timestamp), - packet_number(packet_number), - overhead_length(overhead_length), - payload_length(payload_length), - padding_length(padding_length) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - size_t packet_length() const { - return payload_length + padding_length + overhead_length; - } - Timestamp timestamp = Timestamp::MinusInfinity(); - int64_t packet_number; - size_t overhead_length; - size_t payload_length; - size_t padding_length; -}; } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_GENERIC_PACKET_SENT_H_ diff --git a/logging/rtc_event_log/events/rtc_event_ice_candidate_pair.h b/logging/rtc_event_log/events/rtc_event_ice_candidate_pair.h index 1f4d825a99..85cf79735a 100644 --- a/logging/rtc_event_log/events/rtc_event_ice_candidate_pair.h +++ b/logging/rtc_event_log/events/rtc_event_ice_candidate_pair.h @@ -14,9 +14,13 @@ #include #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" namespace webrtc { @@ -28,6 +32,26 @@ enum class IceCandidatePairEventType { kNumValues, }; +struct LoggedIceCandidatePairEvent { + LoggedIceCandidatePairEvent() = default; + LoggedIceCandidatePairEvent(Timestamp timestamp, + IceCandidatePairEventType type, + uint32_t candidate_pair_id, + uint32_t transaction_id) + : timestamp(timestamp), + type(type), + candidate_pair_id(candidate_pair_id), + transaction_id(transaction_id) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + IceCandidatePairEventType type; + uint32_t candidate_pair_id; + uint32_t transaction_id; +}; + class RtcEventIceCandidatePair final : public RtcEvent { public: static constexpr Type kType = Type::IceCandidatePairEvent; @@ -47,6 +71,19 @@ class RtcEventIceCandidatePair final : public RtcEvent { uint32_t candidate_pair_id() const { return candidate_pair_id_; } uint32_t transaction_id() const { return transaction_id_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventIceCandidatePair(const RtcEventIceCandidatePair& other); @@ -55,26 +92,6 @@ class RtcEventIceCandidatePair final : public RtcEvent { const uint32_t transaction_id_; }; -struct LoggedIceCandidatePairEvent { - LoggedIceCandidatePairEvent() = default; - LoggedIceCandidatePairEvent(Timestamp timestamp, - IceCandidatePairEventType type, - uint32_t candidate_pair_id, - uint32_t transaction_id) - : timestamp(timestamp), - type(type), - candidate_pair_id(candidate_pair_id), - transaction_id(transaction_id) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - IceCandidatePairEventType type; - uint32_t candidate_pair_id; - uint32_t transaction_id; -}; - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_ICE_CANDIDATE_PAIR_H_ diff --git a/logging/rtc_event_log/events/rtc_event_ice_candidate_pair_config.h b/logging/rtc_event_log/events/rtc_event_ice_candidate_pair_config.h index 465a799780..0dfbfc838a 100644 --- a/logging/rtc_event_log/events/rtc_event_ice_candidate_pair_config.h +++ b/logging/rtc_event_log/events/rtc_event_ice_candidate_pair_config.h @@ -14,9 +14,13 @@ #include #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" namespace webrtc { @@ -65,6 +69,22 @@ enum class IceCandidateNetworkType { kNumValues, }; +struct LoggedIceCandidatePairConfig { + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + IceCandidatePairConfigType type; + uint32_t candidate_pair_id; + IceCandidateType local_candidate_type; + IceCandidatePairProtocol local_relay_protocol; + IceCandidateNetworkType local_network_type; + IceCandidatePairAddressFamily local_address_family; + IceCandidateType remote_candidate_type; + IceCandidatePairAddressFamily remote_address_family; + IceCandidatePairProtocol candidate_pair_protocol; +}; + class IceCandidatePairDescription { public: IceCandidatePairDescription(); @@ -105,6 +125,19 @@ class RtcEventIceCandidatePairConfig final : public RtcEvent { return candidate_pair_desc_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventIceCandidatePairConfig(const RtcEventIceCandidatePairConfig& other); @@ -113,22 +146,6 @@ class RtcEventIceCandidatePairConfig final : public RtcEvent { const IceCandidatePairDescription candidate_pair_desc_; }; -struct LoggedIceCandidatePairConfig { - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - IceCandidatePairConfigType type; - uint32_t candidate_pair_id; - IceCandidateType local_candidate_type; - IceCandidatePairProtocol local_relay_protocol; - IceCandidateNetworkType local_network_type; - IceCandidatePairAddressFamily local_address_family; - IceCandidateType remote_candidate_type; - IceCandidatePairAddressFamily remote_address_family; - IceCandidatePairProtocol candidate_pair_protocol; -}; - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_ICE_CANDIDATE_PAIR_CONFIG_H_ diff --git a/logging/rtc_event_log/events/rtc_event_probe_cluster_created.h b/logging/rtc_event_log/events/rtc_event_probe_cluster_created.h index 974a0c9a5c..b91d89c239 100644 --- a/logging/rtc_event_log/events/rtc_event_probe_cluster_created.h +++ b/logging/rtc_event_log/events/rtc_event_probe_cluster_created.h @@ -14,12 +14,39 @@ #include #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" namespace webrtc { +struct LoggedBweProbeClusterCreatedEvent { + LoggedBweProbeClusterCreatedEvent() = default; + LoggedBweProbeClusterCreatedEvent(Timestamp timestamp, + int32_t id, + int32_t bitrate_bps, + uint32_t min_packets, + uint32_t min_bytes) + : timestamp(timestamp), + id(id), + bitrate_bps(bitrate_bps), + min_packets(min_packets), + min_bytes(min_bytes) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + int32_t id; + int32_t bitrate_bps; + uint32_t min_packets; + uint32_t min_bytes; +}; + class RtcEventProbeClusterCreated final : public RtcEvent { public: static constexpr Type kType = Type::ProbeClusterCreated; @@ -40,6 +67,19 @@ class RtcEventProbeClusterCreated final : public RtcEvent { uint32_t min_probes() const { return min_probes_; } uint32_t min_bytes() const { return min_bytes_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventProbeClusterCreated(const RtcEventProbeClusterCreated& other); @@ -49,29 +89,6 @@ class RtcEventProbeClusterCreated final : public RtcEvent { const uint32_t min_bytes_; }; -struct LoggedBweProbeClusterCreatedEvent { - LoggedBweProbeClusterCreatedEvent() = default; - LoggedBweProbeClusterCreatedEvent(Timestamp timestamp, - int32_t id, - int32_t bitrate_bps, - uint32_t min_packets, - uint32_t min_bytes) - : timestamp(timestamp), - id(id), - bitrate_bps(bitrate_bps), - min_packets(min_packets), - min_bytes(min_bytes) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - int32_t id; - int32_t bitrate_bps; - uint32_t min_packets; - uint32_t min_bytes; -}; - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_PROBE_CLUSTER_CREATED_H_ diff --git a/logging/rtc_event_log/events/rtc_event_probe_result_failure.h b/logging/rtc_event_log/events/rtc_event_probe_result_failure.h index fa61b314b4..ba4db75c66 100644 --- a/logging/rtc_event_log/events/rtc_event_probe_result_failure.h +++ b/logging/rtc_event_log/events/rtc_event_probe_result_failure.h @@ -14,9 +14,13 @@ #include #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" namespace webrtc { @@ -27,6 +31,21 @@ enum class ProbeFailureReason { kLast }; +struct LoggedBweProbeFailureEvent { + LoggedBweProbeFailureEvent() = default; + LoggedBweProbeFailureEvent(Timestamp timestamp, + int32_t id, + ProbeFailureReason failure_reason) + : timestamp(timestamp), id(id), failure_reason(failure_reason) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + int32_t id; + ProbeFailureReason failure_reason; +}; + class RtcEventProbeResultFailure final : public RtcEvent { public: static constexpr Type kType = Type::ProbeResultFailure; @@ -42,6 +61,19 @@ class RtcEventProbeResultFailure final : public RtcEvent { int32_t id() const { return id_; } ProbeFailureReason failure_reason() const { return failure_reason_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventProbeResultFailure(const RtcEventProbeResultFailure& other); @@ -49,21 +81,6 @@ class RtcEventProbeResultFailure final : public RtcEvent { const ProbeFailureReason failure_reason_; }; -struct LoggedBweProbeFailureEvent { - LoggedBweProbeFailureEvent() = default; - LoggedBweProbeFailureEvent(Timestamp timestamp, - int32_t id, - ProbeFailureReason failure_reason) - : timestamp(timestamp), id(id), failure_reason(failure_reason) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - int32_t id; - ProbeFailureReason failure_reason; -}; - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_PROBE_RESULT_FAILURE_H_ diff --git a/logging/rtc_event_log/events/rtc_event_probe_result_success.h b/logging/rtc_event_log/events/rtc_event_probe_result_success.h index d00cfa81d6..172e9aa2eb 100644 --- a/logging/rtc_event_log/events/rtc_event_probe_result_success.h +++ b/logging/rtc_event_log/events/rtc_event_probe_result_success.h @@ -14,12 +14,31 @@ #include #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" namespace webrtc { +struct LoggedBweProbeSuccessEvent { + LoggedBweProbeSuccessEvent() = default; + LoggedBweProbeSuccessEvent(Timestamp timestamp, + int32_t id, + int32_t bitrate_bps) + : timestamp(timestamp), id(id), bitrate_bps(bitrate_bps) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + int32_t id; + int32_t bitrate_bps; +}; + class RtcEventProbeResultSuccess final : public RtcEvent { public: static constexpr Type kType = Type::ProbeResultSuccess; @@ -35,6 +54,19 @@ class RtcEventProbeResultSuccess final : public RtcEvent { int32_t id() const { return id_; } int32_t bitrate_bps() const { return bitrate_bps_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventProbeResultSuccess(const RtcEventProbeResultSuccess& other); @@ -42,21 +74,6 @@ class RtcEventProbeResultSuccess final : public RtcEvent { const int32_t bitrate_bps_; }; -struct LoggedBweProbeSuccessEvent { - LoggedBweProbeSuccessEvent() = default; - LoggedBweProbeSuccessEvent(Timestamp timestamp, - int32_t id, - int32_t bitrate_bps) - : timestamp(timestamp), id(id), bitrate_bps(bitrate_bps) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - int32_t id; - int32_t bitrate_bps; -}; - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_PROBE_RESULT_SUCCESS_H_ diff --git a/logging/rtc_event_log/events/rtc_event_remote_estimate.h b/logging/rtc_event_log/events/rtc_event_remote_estimate.h index 956e05f682..17de63c475 100644 --- a/logging/rtc_event_log/events/rtc_event_remote_estimate.h +++ b/logging/rtc_event_log/events/rtc_event_remote_estimate.h @@ -11,14 +11,29 @@ #define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_REMOTE_ESTIMATE_H_ #include +#include +#include +#include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/data_rate.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" namespace webrtc { +struct LoggedRemoteEstimateEvent { + LoggedRemoteEstimateEvent() = default; + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + absl::optional link_capacity_lower; + absl::optional link_capacity_upper; +}; + class RtcEventRemoteEstimate final : public RtcEvent { public: static constexpr Type kType = Type::RemoteEstimateEvent; @@ -31,19 +46,22 @@ class RtcEventRemoteEstimate final : public RtcEvent { Type GetType() const override { return kType; } bool IsConfigEvent() const override { return false; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + const DataRate link_capacity_lower_; const DataRate link_capacity_upper_; }; -struct LoggedRemoteEstimateEvent { - LoggedRemoteEstimateEvent() = default; - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - absl::optional link_capacity_lower; - absl::optional link_capacity_upper; -}; } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_REMOTE_ESTIMATE_H_ diff --git a/logging/rtc_event_log/events/rtc_event_route_change.h b/logging/rtc_event_log/events/rtc_event_route_change.h index 4a4e9aef80..542d15e3b3 100644 --- a/logging/rtc_event_log/events/rtc_event_route_change.h +++ b/logging/rtc_event_log/events/rtc_event_route_change.h @@ -12,12 +12,29 @@ #define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_ROUTE_CHANGE_H_ #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" namespace webrtc { +struct LoggedRouteChangeEvent { + LoggedRouteChangeEvent() = default; + LoggedRouteChangeEvent(Timestamp timestamp, bool connected, uint32_t overhead) + : timestamp(timestamp), connected(connected), overhead(overhead) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + bool connected; + uint32_t overhead; +}; + class RtcEventRouteChange final : public RtcEvent { public: static constexpr Type kType = Type::RouteChangeEvent; @@ -33,6 +50,19 @@ class RtcEventRouteChange final : public RtcEvent { bool connected() const { return connected_; } uint32_t overhead() const { return overhead_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventRouteChange(const RtcEventRouteChange& other); @@ -40,18 +70,5 @@ class RtcEventRouteChange final : public RtcEvent { const uint32_t overhead_; }; -struct LoggedRouteChangeEvent { - LoggedRouteChangeEvent() = default; - LoggedRouteChangeEvent(Timestamp timestamp, bool connected, uint32_t overhead) - : timestamp(timestamp), connected(connected), overhead(overhead) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - bool connected; - uint32_t overhead; -}; - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_ROUTE_CHANGE_H_ diff --git a/logging/rtc_event_log/events/rtc_event_rtcp_packet_incoming.h b/logging/rtc_event_log/events/rtc_event_rtcp_packet_incoming.h index 1cbac7712f..84fe398e08 100644 --- a/logging/rtc_event_log/events/rtc_event_rtcp_packet_incoming.h +++ b/logging/rtc_event_log/events/rtc_event_rtcp_packet_incoming.h @@ -14,9 +14,14 @@ #include #include +#include +#include +#include "absl/strings/string_view.h" #include "api/array_view.h" #include "api/rtc_event_log/rtc_event.h" +#include "logging/rtc_event_log/events/logged_rtp_rtcp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" #include "rtc_base/buffer.h" namespace webrtc { @@ -35,6 +40,19 @@ class RtcEventRtcpPacketIncoming final : public RtcEvent { const rtc::Buffer& packet() const { return packet_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventRtcpPacketIncoming(const RtcEventRtcpPacketIncoming& other); diff --git a/logging/rtc_event_log/events/rtc_event_rtcp_packet_outgoing.h b/logging/rtc_event_log/events/rtc_event_rtcp_packet_outgoing.h index 0ecccbeaae..687bd319b4 100644 --- a/logging/rtc_event_log/events/rtc_event_rtcp_packet_outgoing.h +++ b/logging/rtc_event_log/events/rtc_event_rtcp_packet_outgoing.h @@ -14,9 +14,14 @@ #include #include +#include +#include +#include "absl/strings/string_view.h" #include "api/array_view.h" #include "api/rtc_event_log/rtc_event.h" +#include "logging/rtc_event_log/events/logged_rtp_rtcp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" #include "rtc_base/buffer.h" namespace webrtc { @@ -35,6 +40,19 @@ class RtcEventRtcpPacketOutgoing final : public RtcEvent { const rtc::Buffer& packet() const { return packet_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventRtcpPacketOutgoing(const RtcEventRtcpPacketOutgoing& other); diff --git a/logging/rtc_event_log/events/rtc_event_rtp_packet_incoming.h b/logging/rtc_event_log/events/rtc_event_rtp_packet_incoming.h index ee48fa360b..926ddddff5 100644 --- a/logging/rtc_event_log/events/rtc_event_rtp_packet_incoming.h +++ b/logging/rtc_event_log/events/rtc_event_rtp_packet_incoming.h @@ -13,11 +13,17 @@ #include #include +#include #include +#include #include +#include +#include "absl/strings/string_view.h" #include "api/array_view.h" #include "api/rtc_event_log/rtc_event.h" +#include "logging/rtc_event_log/events/logged_rtp_rtcp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" #include "modules/rtp_rtcp/source/rtp_packet.h" namespace webrtc { @@ -59,6 +65,19 @@ class RtcEventRtpPacketIncoming final : public RtcEvent { size_t header_length() const { return packet_.headers_size(); } size_t padding_length() const { return packet_.padding_size(); } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::map>& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventRtpPacketIncoming(const RtcEventRtpPacketIncoming& other); diff --git a/logging/rtc_event_log/events/rtc_event_rtp_packet_outgoing.h b/logging/rtc_event_log/events/rtc_event_rtp_packet_outgoing.h index 9ef5b1afdd..c7b7a09718 100644 --- a/logging/rtc_event_log/events/rtc_event_rtp_packet_outgoing.h +++ b/logging/rtc_event_log/events/rtc_event_rtp_packet_outgoing.h @@ -13,11 +13,17 @@ #include #include +#include #include +#include #include +#include +#include "absl/strings/string_view.h" #include "api/array_view.h" #include "api/rtc_event_log/rtc_event.h" +#include "logging/rtc_event_log/events/logged_rtp_rtcp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" #include "modules/rtp_rtcp/source/rtp_packet.h" namespace webrtc { @@ -61,11 +67,24 @@ class RtcEventRtpPacketOutgoing final : public RtcEvent { size_t padding_length() const { return packet_.padding_size(); } int probe_cluster_id() const { return probe_cluster_id_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::map>& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventRtpPacketOutgoing(const RtcEventRtpPacketOutgoing& other); const RtpPacket packet_; - // TODO(eladalon): Delete |probe_cluster_id_| along with legacy encoding. + // TODO(eladalon): Delete `probe_cluster_id_` along with legacy encoding. const int probe_cluster_id_; }; diff --git a/logging/rtc_event_log/events/rtc_event_video_receive_stream_config.h b/logging/rtc_event_log/events/rtc_event_video_receive_stream_config.h index e7b9061872..f3380d1bb6 100644 --- a/logging/rtc_event_log/events/rtc_event_video_receive_stream_config.h +++ b/logging/rtc_event_log/events/rtc_event_video_receive_stream_config.h @@ -12,13 +12,29 @@ #define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_VIDEO_RECEIVE_STREAM_CONFIG_H_ #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" #include "logging/rtc_event_log/rtc_stream_config.h" namespace webrtc { +struct LoggedVideoRecvConfig { + LoggedVideoRecvConfig() = default; + LoggedVideoRecvConfig(Timestamp timestamp, const rtclog::StreamConfig config) + : timestamp(timestamp), config(config) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + rtclog::StreamConfig config; +}; + class RtcEventVideoReceiveStreamConfig final : public RtcEvent { public: static constexpr Type kType = Type::VideoReceiveStreamConfig; @@ -34,6 +50,19 @@ class RtcEventVideoReceiveStreamConfig final : public RtcEvent { const rtclog::StreamConfig& config() const { return *config_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventVideoReceiveStreamConfig( const RtcEventVideoReceiveStreamConfig& other); @@ -41,18 +70,6 @@ class RtcEventVideoReceiveStreamConfig final : public RtcEvent { const std::unique_ptr config_; }; -struct LoggedVideoRecvConfig { - LoggedVideoRecvConfig() = default; - LoggedVideoRecvConfig(Timestamp timestamp, const rtclog::StreamConfig config) - : timestamp(timestamp), config(config) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - rtclog::StreamConfig config; -}; - } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_VIDEO_RECEIVE_STREAM_CONFIG_H_ diff --git a/logging/rtc_event_log/events/rtc_event_video_send_stream_config.h b/logging/rtc_event_log/events/rtc_event_video_send_stream_config.h index e72e75e49d..15c28999dc 100644 --- a/logging/rtc_event_log/events/rtc_event_video_send_stream_config.h +++ b/logging/rtc_event_log/events/rtc_event_video_send_stream_config.h @@ -12,13 +12,29 @@ #define LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_VIDEO_SEND_STREAM_CONFIG_H_ #include +#include +#include +#include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event.h" #include "api/units/timestamp.h" +#include "logging/rtc_event_log/events/rtc_event_field_encoding_parser.h" #include "logging/rtc_event_log/rtc_stream_config.h" namespace webrtc { +struct LoggedVideoSendConfig { + LoggedVideoSendConfig() = default; + LoggedVideoSendConfig(Timestamp timestamp, const rtclog::StreamConfig config) + : timestamp(timestamp), config(config) {} + + int64_t log_time_us() const { return timestamp.us(); } + int64_t log_time_ms() const { return timestamp.ms(); } + + Timestamp timestamp = Timestamp::MinusInfinity(); + rtclog::StreamConfig config; +}; + class RtcEventVideoSendStreamConfig final : public RtcEvent { public: static constexpr Type kType = Type::VideoSendStreamConfig; @@ -34,23 +50,25 @@ class RtcEventVideoSendStreamConfig final : public RtcEvent { const rtclog::StreamConfig& config() const { return *config_; } + static std::string Encode(rtc::ArrayView batch) { + // TODO(terelius): Implement + return ""; + } + + static RtcEventLogParseStatus Parse( + absl::string_view encoded_bytes, + bool batched, + std::vector& output) { + // TODO(terelius): Implement + return RtcEventLogParseStatus::Error("Not Implemented", __FILE__, __LINE__); + } + private: RtcEventVideoSendStreamConfig(const RtcEventVideoSendStreamConfig& other); const std::unique_ptr config_; }; -struct LoggedVideoSendConfig { - LoggedVideoSendConfig() = default; - LoggedVideoSendConfig(Timestamp timestamp, const rtclog::StreamConfig config) - : timestamp(timestamp), config(config) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - rtclog::StreamConfig config; -}; } // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_EVENTS_RTC_EVENT_VIDEO_SEND_STREAM_CONFIG_H_ diff --git a/logging/rtc_event_log/logged_events.cc b/logging/rtc_event_log/logged_events.cc deleted file mode 100644 index 5ef3de11c0..0000000000 --- a/logging/rtc_event_log/logged_events.cc +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#include "logging/rtc_event_log/logged_events.h" - -namespace webrtc { - -LoggedPacketInfo::LoggedPacketInfo(const LoggedRtpPacket& rtp, - LoggedMediaType media_type, - bool rtx, - Timestamp capture_time) - : ssrc(rtp.header.ssrc), - stream_seq_no(rtp.header.sequenceNumber), - size(static_cast(rtp.total_length)), - payload_size(static_cast(rtp.total_length - - rtp.header.paddingLength - - rtp.header.headerLength)), - padding_size(static_cast(rtp.header.paddingLength)), - payload_type(rtp.header.payloadType), - media_type(media_type), - rtx(rtx), - marker_bit(rtp.header.markerBit), - has_transport_seq_no(rtp.header.extension.hasTransportSequenceNumber), - transport_seq_no(static_cast( - has_transport_seq_no ? rtp.header.extension.transportSequenceNumber - : 0)), - capture_time(capture_time), - log_packet_time(Timestamp::Micros(rtp.log_time_us())), - reported_send_time(rtp.header.extension.hasAbsoluteSendTime - ? rtp.header.extension.GetAbsoluteSendTimestamp() - : Timestamp::MinusInfinity()) {} - -LoggedPacketInfo::LoggedPacketInfo(const LoggedPacketInfo&) = default; - -LoggedPacketInfo::~LoggedPacketInfo() {} - -LoggedRtcpPacket::LoggedRtcpPacket(Timestamp timestamp, - const std::vector& packet) - : timestamp(timestamp), raw_data(packet) {} - -LoggedRtcpPacket::LoggedRtcpPacket(Timestamp timestamp, - const std::string& packet) - : timestamp(timestamp), raw_data(packet.size()) { - memcpy(raw_data.data(), packet.data(), packet.size()); -} - -LoggedRtcpPacket::LoggedRtcpPacket(const LoggedRtcpPacket& rhs) = default; - -LoggedRtcpPacket::~LoggedRtcpPacket() = default; - -} // namespace webrtc diff --git a/logging/rtc_event_log/logged_events.h b/logging/rtc_event_log/logged_events.h index 5bce658c30..d6b3cc607e 100644 --- a/logging/rtc_event_log/logged_events.h +++ b/logging/rtc_event_log/logged_events.h @@ -7,337 +7,12 @@ * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ + #ifndef LOGGING_RTC_EVENT_LOG_LOGGED_EVENTS_H_ #define LOGGING_RTC_EVENT_LOG_LOGGED_EVENTS_H_ -#include -#include - -#include "absl/types/optional.h" -#include "api/rtp_headers.h" -#include "api/units/time_delta.h" -#include "api/units/timestamp.h" -#include "modules/rtp_rtcp/source/rtcp_packet/bye.h" -#include "modules/rtp_rtcp/source/rtcp_packet/extended_reports.h" -#include "modules/rtp_rtcp/source/rtcp_packet/fir.h" -#include "modules/rtp_rtcp/source/rtcp_packet/loss_notification.h" -#include "modules/rtp_rtcp/source/rtcp_packet/nack.h" -#include "modules/rtp_rtcp/source/rtcp_packet/pli.h" -#include "modules/rtp_rtcp/source/rtcp_packet/receiver_report.h" -#include "modules/rtp_rtcp/source/rtcp_packet/remb.h" -#include "modules/rtp_rtcp/source/rtcp_packet/sender_report.h" -#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" - -namespace webrtc { - -// The different event types are deliberately POD. Analysis of large logs is -// already resource intensive. The code simplifications that would be possible -// possible by having a base class (containing e.g. the log time) are not -// considered to outweigh the added memory and runtime overhead incurred by -// adding a vptr. - -struct LoggedRtpPacket { - LoggedRtpPacket(Timestamp timestamp, - RTPHeader header, - size_t header_length, - size_t total_length) - : timestamp(timestamp), - header(header), - header_length(header_length), - total_length(total_length) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp; - // TODO(terelius): This allocates space for 15 CSRCs even if none are used. - RTPHeader header; - size_t header_length; - size_t total_length; -}; - -struct LoggedRtpPacketIncoming { - LoggedRtpPacketIncoming(Timestamp timestamp, - RTPHeader header, - size_t header_length, - size_t total_length) - : rtp(timestamp, header, header_length, total_length) {} - int64_t log_time_us() const { return rtp.timestamp.us(); } - int64_t log_time_ms() const { return rtp.timestamp.ms(); } - - LoggedRtpPacket rtp; -}; - -struct LoggedRtpPacketOutgoing { - LoggedRtpPacketOutgoing(Timestamp timestamp, - RTPHeader header, - size_t header_length, - size_t total_length) - : rtp(timestamp, header, header_length, total_length) {} - int64_t log_time_us() const { return rtp.timestamp.us(); } - int64_t log_time_ms() const { return rtp.timestamp.ms(); } - - LoggedRtpPacket rtp; -}; - -struct LoggedRtcpPacket { - LoggedRtcpPacket(Timestamp timestamp, const std::vector& packet); - LoggedRtcpPacket(Timestamp timestamp, const std::string& packet); - LoggedRtcpPacket(const LoggedRtcpPacket&); - ~LoggedRtcpPacket(); - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp; - std::vector raw_data; -}; - -struct LoggedRtcpPacketIncoming { - LoggedRtcpPacketIncoming(Timestamp timestamp, - const std::vector& packet) - : rtcp(timestamp, packet) {} - LoggedRtcpPacketIncoming(Timestamp timestamp, const std::string& packet) - : rtcp(timestamp, packet) {} - - int64_t log_time_us() const { return rtcp.timestamp.us(); } - int64_t log_time_ms() const { return rtcp.timestamp.ms(); } - - LoggedRtcpPacket rtcp; -}; - -struct LoggedRtcpPacketOutgoing { - LoggedRtcpPacketOutgoing(Timestamp timestamp, - const std::vector& packet) - : rtcp(timestamp, packet) {} - LoggedRtcpPacketOutgoing(Timestamp timestamp, const std::string& packet) - : rtcp(timestamp, packet) {} - - int64_t log_time_us() const { return rtcp.timestamp.us(); } - int64_t log_time_ms() const { return rtcp.timestamp.ms(); } - - LoggedRtcpPacket rtcp; -}; - -struct LoggedRtcpPacketReceiverReport { - LoggedRtcpPacketReceiverReport() = default; - LoggedRtcpPacketReceiverReport(Timestamp timestamp, - const rtcp::ReceiverReport& rr) - : timestamp(timestamp), rr(rr) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - rtcp::ReceiverReport rr; -}; - -struct LoggedRtcpPacketSenderReport { - LoggedRtcpPacketSenderReport() = default; - LoggedRtcpPacketSenderReport(Timestamp timestamp, - const rtcp::SenderReport& sr) - : timestamp(timestamp), sr(sr) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - rtcp::SenderReport sr; -}; - -struct LoggedRtcpPacketExtendedReports { - LoggedRtcpPacketExtendedReports() = default; - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - rtcp::ExtendedReports xr; -}; - -struct LoggedRtcpPacketRemb { - LoggedRtcpPacketRemb() = default; - LoggedRtcpPacketRemb(Timestamp timestamp, const rtcp::Remb& remb) - : timestamp(timestamp), remb(remb) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - rtcp::Remb remb; -}; - -struct LoggedRtcpPacketNack { - LoggedRtcpPacketNack() = default; - LoggedRtcpPacketNack(Timestamp timestamp, const rtcp::Nack& nack) - : timestamp(timestamp), nack(nack) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - rtcp::Nack nack; -}; - -struct LoggedRtcpPacketFir { - LoggedRtcpPacketFir() = default; - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - rtcp::Fir fir; -}; - -struct LoggedRtcpPacketPli { - LoggedRtcpPacketPli() = default; - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - rtcp::Pli pli; -}; - -struct LoggedRtcpPacketTransportFeedback { - LoggedRtcpPacketTransportFeedback() - : transport_feedback(/*include_timestamps=*/true, /*include_lost*/ true) { - } - LoggedRtcpPacketTransportFeedback( - Timestamp timestamp, - const rtcp::TransportFeedback& transport_feedback) - : timestamp(timestamp), transport_feedback(transport_feedback) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - rtcp::TransportFeedback transport_feedback; -}; - -struct LoggedRtcpPacketLossNotification { - LoggedRtcpPacketLossNotification() = default; - LoggedRtcpPacketLossNotification( - Timestamp timestamp, - const rtcp::LossNotification& loss_notification) - : timestamp(timestamp), loss_notification(loss_notification) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - rtcp::LossNotification loss_notification; -}; - -struct LoggedRtcpPacketBye { - LoggedRtcpPacketBye() = default; - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp = Timestamp::MinusInfinity(); - rtcp::Bye bye; -}; - -struct LoggedStartEvent { - explicit LoggedStartEvent(Timestamp timestamp) - : LoggedStartEvent(timestamp, timestamp) {} - - LoggedStartEvent(Timestamp timestamp, Timestamp utc_start_time) - : timestamp(timestamp), utc_start_time(utc_start_time) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp utc_time() const { return utc_start_time; } - - Timestamp timestamp; - Timestamp utc_start_time; -}; - -struct LoggedStopEvent { - explicit LoggedStopEvent(Timestamp timestamp) : timestamp(timestamp) {} - - int64_t log_time_us() const { return timestamp.us(); } - int64_t log_time_ms() const { return timestamp.ms(); } - - Timestamp timestamp; -}; - -struct InferredRouteChangeEvent { - int64_t log_time_ms() const { return log_time.ms(); } - int64_t log_time_us() const { return log_time.us(); } - uint32_t route_id; - Timestamp log_time = Timestamp::MinusInfinity(); - uint16_t send_overhead; - uint16_t return_overhead; -}; - -enum class LoggedMediaType : uint8_t { kUnknown, kAudio, kVideo }; - -struct LoggedPacketInfo { - LoggedPacketInfo(const LoggedRtpPacket& rtp, - LoggedMediaType media_type, - bool rtx, - Timestamp capture_time); - LoggedPacketInfo(const LoggedPacketInfo&); - ~LoggedPacketInfo(); - int64_t log_time_ms() const { return log_packet_time.ms(); } - int64_t log_time_us() const { return log_packet_time.us(); } - uint32_t ssrc; - uint16_t stream_seq_no; - uint16_t size; - uint16_t payload_size; - uint16_t padding_size; - uint16_t overhead = 0; - uint8_t payload_type; - LoggedMediaType media_type = LoggedMediaType::kUnknown; - bool rtx = false; - bool marker_bit = false; - bool has_transport_seq_no = false; - bool last_in_feedback = false; - uint16_t transport_seq_no = 0; - // The RTP header timestamp unwrapped and converted from tick count to seconds - // based timestamp. - Timestamp capture_time; - // The time the packet was logged. This is the receive time for incoming - // packets and send time for outgoing. - Timestamp log_packet_time; - // Send time as reported by abs-send-time extension, For outgoing packets this - // corresponds to log_packet_time, but might be measured using another clock. - Timestamp reported_send_time; - // The receive time that was reported in feedback. For incoming packets this - // corresponds to log_packet_time, but might be measured using another clock. - // PlusInfinity indicates that the packet was lost. - Timestamp reported_recv_time = Timestamp::MinusInfinity(); - // The time feedback message was logged. This is the feedback send time for - // incoming packets and feedback receive time for outgoing. - // PlusInfinity indicates that feedback was expected but not received. - Timestamp log_feedback_time = Timestamp::MinusInfinity(); - // The delay betweeen receiving an RTP packet and sending feedback for - // incoming packets. For outgoing packets we don't know the feedback send - // time, and this is instead calculated as the difference in reported receive - // time between this packet and the last packet in the same feedback message. - TimeDelta feedback_hold_duration = TimeDelta::MinusInfinity(); -}; - -enum class LoggedIceEventType { - kAdded, - kUpdated, - kDestroyed, - kSelected, - kCheckSent, - kCheckReceived, - kCheckResponseSent, - kCheckResponseReceived, -}; - -struct LoggedIceEvent { - uint32_t candidate_pair_id; - Timestamp log_time; - LoggedIceEventType event_type; -}; - +// TODO(terelius): Delete this forwarding header when downstream +// projects have been updated. +#include "logging/rtc_event_log/events/logged_rtp_rtcp.h" -} // namespace webrtc #endif // LOGGING_RTC_EVENT_LOG_LOGGED_EVENTS_H_ diff --git a/logging/rtc_event_log/rtc_event_log2.proto b/logging/rtc_event_log/rtc_event_log2.proto index 4ff3206439..a541533dcc 100644 --- a/logging/rtc_event_log/rtc_event_log2.proto +++ b/logging/rtc_event_log/rtc_event_log2.proto @@ -144,7 +144,7 @@ message IncomingRtpPackets { optional int32 transmission_time_offset = 16; optional uint32 absolute_send_time = 17; optional uint32 video_rotation = 18; - // |audio_level| and |voice_activity| are always used in conjunction. + // `audio_level` and `voice_activity` are always used in conjunction. optional uint32 audio_level = 19; optional bool voice_activity = 20; // TODO(terelius): Add other header extensions like playout delay? @@ -165,7 +165,7 @@ message IncomingRtpPackets { optional bytes transmission_time_offset_deltas = 116; optional bytes absolute_send_time_deltas = 117; optional bytes video_rotation_deltas = 118; - // |audio_level| and |voice_activity| are always used in conjunction. + // `audio_level` and `voice_activity` are always used in conjunction. optional bytes audio_level_deltas = 119; optional bytes voice_activity_deltas = 120; } @@ -212,7 +212,7 @@ message OutgoingRtpPackets { optional int32 transmission_time_offset = 16; optional uint32 absolute_send_time = 17; optional uint32 video_rotation = 18; - // |audio_level| and |voice_activity| are always used in conjunction. + // `audio_level` and `voice_activity` are always used in conjunction. optional uint32 audio_level = 19; optional bool voice_activity = 20; // TODO(terelius): Add other header extensions like playout delay? @@ -233,7 +233,7 @@ message OutgoingRtpPackets { optional bytes transmission_time_offset_deltas = 116; optional bytes absolute_send_time_deltas = 117; optional bytes video_rotation_deltas = 118; - // |audio_level| and |voice_activity| are always used in conjunction. + // `audio_level` and `voice_activity` are always used in conjunction. optional bytes audio_level_deltas = 119; optional bytes voice_activity_deltas = 120; } diff --git a/logging/rtc_event_log/rtc_event_log2rtp_dump.cc b/logging/rtc_event_log/rtc_event_log2rtp_dump.cc index c9d4a6ca88..93cd4652cc 100644 --- a/logging/rtc_event_log/rtc_event_log2rtp_dump.cc +++ b/logging/rtc_event_log/rtc_event_log2rtp_dump.cc @@ -70,7 +70,7 @@ namespace { using MediaType = webrtc::ParsedRtcEventLog::MediaType; // Parses the input string for a valid SSRC. If a valid SSRC is found, it is -// written to the output variable |ssrc|, and true is returned. Otherwise, +// written to the output variable `ssrc`, and true is returned. Otherwise, // false is returned. // The empty string must be validated as true, because it is the default value // of the command-line flag. In this case, no value is written to the output @@ -240,7 +240,7 @@ int main(int argc, char* argv[]) { continue; event_processor.AddEvents(stream.incoming_packets, handle_rtp); } - // Note that |packet_ssrc| is the sender SSRC. An RTCP message may contain + // Note that `packet_ssrc` is the sender SSRC. An RTCP message may contain // report blocks for many streams, thus several SSRCs and they don't // necessarily have to be of the same media type. We therefore don't // support filtering of RTCP based on SSRC and media type. diff --git a/logging/rtc_event_log/rtc_event_log_impl.cc b/logging/rtc_event_log/rtc_event_log_impl.cc index 700f639311..0766b4a884 100644 --- a/logging/rtc_event_log/rtc_event_log_impl.cc +++ b/logging/rtc_event_log/rtc_event_log_impl.cc @@ -22,7 +22,6 @@ #include "logging/rtc_event_log/encoder/rtc_event_log_encoder_legacy.h" #include "logging/rtc_event_log/encoder/rtc_event_log_encoder_new_format.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/event.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" @@ -40,15 +39,15 @@ std::unique_ptr CreateEncoder( RtcEventLog::EncodingType type) { switch (type) { case RtcEventLog::EncodingType::Legacy: - RTC_LOG(LS_INFO) << "Creating legacy encoder for RTC event log."; + RTC_DLOG(LS_INFO) << "Creating legacy encoder for RTC event log."; return std::make_unique(); case RtcEventLog::EncodingType::NewFormat: - RTC_LOG(LS_INFO) << "Creating new format encoder for RTC event log."; + RTC_DLOG(LS_INFO) << "Creating new format encoder for RTC event log."; return std::make_unique(); default: RTC_LOG(LS_ERROR) << "Unknown RtcEventLog encoder type (" << int(type) << ")"; - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return std::unique_ptr(nullptr); } } @@ -92,13 +91,12 @@ bool RtcEventLogImpl::StartLogging(std::unique_ptr output, const int64_t timestamp_us = rtc::TimeMillis() * 1000; const int64_t utc_time_us = rtc::TimeUTCMillis() * 1000; - RTC_LOG(LS_INFO) << "Starting WebRTC event log. (Timestamp, UTC) = " - "(" + RTC_LOG(LS_INFO) << "Starting WebRTC event log. (Timestamp, UTC) = (" << timestamp_us << ", " << utc_time_us << ")."; RTC_DCHECK_RUN_ON(&logging_state_checker_); logging_state_started_ = true; - // Binding to |this| is safe because |this| outlives the |task_queue_|. + // Binding to `this` is safe because `this` outlives the `task_queue_`. task_queue_->PostTask([this, output_period_ms, timestamp_us, utc_time_us, output = std::move(output)]() mutable { RTC_DCHECK_RUN_ON(task_queue_.get()); @@ -114,15 +112,15 @@ bool RtcEventLogImpl::StartLogging(std::unique_ptr output, } void RtcEventLogImpl::StopLogging() { - RTC_LOG(LS_INFO) << "Stopping WebRTC event log."; + RTC_DLOG(LS_INFO) << "Stopping WebRTC event log."; // TODO(danilchap): Do not block current thread waiting on the task queue. // It might work for now, for current callers, but disallows caller to share - // threads with the |task_queue_|. + // threads with the `task_queue_`. rtc::Event output_stopped; StopLogging([&output_stopped]() { output_stopped.Set(); }); output_stopped.Wait(rtc::Event::kForever); - RTC_LOG(LS_INFO) << "WebRTC event log successfully stopped."; + RTC_DLOG(LS_INFO) << "WebRTC event log successfully stopped."; } void RtcEventLogImpl::StopLogging(std::function callback) { @@ -142,7 +140,7 @@ void RtcEventLogImpl::StopLogging(std::function callback) { void RtcEventLogImpl::Log(std::unique_ptr event) { RTC_CHECK(event); - // Binding to |this| is safe because |this| outlives the |task_queue_|. + // Binding to `this` is safe because `this` outlives the `task_queue_`. task_queue_->PostTask([this, event = std::move(event)]() mutable { RTC_DCHECK_RUN_ON(task_queue_.get()); LogToMemory(std::move(event)); @@ -162,7 +160,7 @@ void RtcEventLogImpl::ScheduleOutput() { RTC_DCHECK(output_period_ms_.has_value()); if (*output_period_ms_ == kImmediateOutput) { - // We are already on the |task_queue_| so there is no reason to post a task + // We are already on the `task_queue_` so there is no reason to post a task // if we want to output immediately. LogEventsFromMemoryToOutput(); return; @@ -170,7 +168,7 @@ void RtcEventLogImpl::ScheduleOutput() { if (!output_scheduled_) { output_scheduled_ = true; - // Binding to |this| is safe because |this| outlives the |task_queue_|. + // Binding to `this` is safe because `this` outlives the `task_queue_`. auto output_task = [this]() { RTC_DCHECK_RUN_ON(task_queue_.get()); if (event_output_) { @@ -205,7 +203,7 @@ void RtcEventLogImpl::LogEventsFromMemoryToOutput() { last_output_ms_ = rtc::TimeMillis(); // Serialize all stream configurations that haven't already been written to - // this output. |num_config_events_written_| is used to track which configs we + // this output. `num_config_events_written_` is used to track which configs we // have already written. (Note that the config may have been written to // previous outputs; configs are not discarded.) std::string encoded_configs; diff --git a/logging/rtc_event_log/rtc_event_log_impl.h b/logging/rtc_event_log/rtc_event_log_impl.h index 0b6a71b24b..61e90d139d 100644 --- a/logging/rtc_event_log/rtc_event_log_impl.h +++ b/logging/rtc_event_log/rtc_event_log_impl.h @@ -82,8 +82,8 @@ class RtcEventLogImpl final : public RtcEventLog { RTC_NO_UNIQUE_ADDRESS SequenceChecker logging_state_checker_; bool logging_state_started_ RTC_GUARDED_BY(logging_state_checker_); - // Since we are posting tasks bound to |this|, it is critical that the event - // log and its members outlive |task_queue_|. Keep the |task_queue_| + // Since we are posting tasks bound to `this`, it is critical that the event + // log and its members outlive `task_queue_`. Keep the `task_queue_` // last to ensure it destructs first, or else tasks living on the queue might // access other members after they've been torn down. std::unique_ptr task_queue_; diff --git a/logging/rtc_event_log/rtc_event_log_parser.cc b/logging/rtc_event_log/rtc_event_log_parser.cc index 08fb9408c1..f0d165bdb2 100644 --- a/logging/rtc_event_log/rtc_event_log_parser.cc +++ b/logging/rtc_event_log/rtc_event_log_parser.cc @@ -28,6 +28,7 @@ #include "logging/rtc_event_log/encoder/delta_encoding.h" #include "logging/rtc_event_log/encoder/rtc_event_log_encoder_common.h" #include "logging/rtc_event_log/encoder/var_int.h" +#include "logging/rtc_event_log/events/logged_rtp_rtcp.h" #include "logging/rtc_event_log/rtc_event_processor.h" #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h" #include "modules/include/module_common_types_public.h" @@ -35,7 +36,7 @@ #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/byte_io.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" -#include "modules/rtp_rtcp/source/rtp_utility.h" +#include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" @@ -53,6 +54,12 @@ return ParsedRtcEventLog::ParseStatus::Error(#X, __FILE__, __LINE__); \ } while (0) +#define RTC_PARSE_CHECK_OR_RETURN_MESSAGE(X, M) \ + do { \ + if (!(X)) \ + return ParsedRtcEventLog::ParseStatus::Error((M), __FILE__, __LINE__); \ + } while (0) + #define RTC_PARSE_CHECK_OR_RETURN_OP(OP, X, Y) \ do { \ if (!((X)OP(Y))) \ @@ -174,7 +181,7 @@ RtcpMode GetRuntimeRtcpMode(rtclog::VideoReceiveConfig::RtcpMode rtcp_mode) { case rtclog::VideoReceiveConfig::RTCP_REDUCEDSIZE: return RtcpMode::kReducedSize; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return RtcpMode::kOff; } @@ -188,7 +195,7 @@ BandwidthUsage GetRuntimeDetectorState( case rtclog::DelayBasedBweUpdate::BWE_OVERUSING: return BandwidthUsage::kBwOverusing; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return BandwidthUsage::kBwNormal; } @@ -204,7 +211,7 @@ IceCandidatePairConfigType GetRuntimeIceCandidatePairConfigType( case rtclog::IceCandidatePairConfig::SELECTED: return IceCandidatePairConfigType::kSelected; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return IceCandidatePairConfigType::kAdded; } @@ -222,7 +229,7 @@ IceCandidateType GetRuntimeIceCandidateType( case rtclog::IceCandidatePairConfig::UNKNOWN_CANDIDATE_TYPE: return IceCandidateType::kUnknown; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return IceCandidateType::kUnknown; } @@ -240,7 +247,7 @@ IceCandidatePairProtocol GetRuntimeIceCandidatePairProtocol( case rtclog::IceCandidatePairConfig::UNKNOWN_PROTOCOL: return IceCandidatePairProtocol::kUnknown; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return IceCandidatePairProtocol::kUnknown; } @@ -254,7 +261,7 @@ IceCandidatePairAddressFamily GetRuntimeIceCandidatePairAddressFamily( case rtclog::IceCandidatePairConfig::UNKNOWN_ADDRESS_FAMILY: return IceCandidatePairAddressFamily::kUnknown; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return IceCandidatePairAddressFamily::kUnknown; } @@ -274,7 +281,7 @@ IceCandidateNetworkType GetRuntimeIceCandidateNetworkType( case rtclog::IceCandidatePairConfig::UNKNOWN_NETWORK_TYPE: return IceCandidateNetworkType::kUnknown; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return IceCandidateNetworkType::kUnknown; } @@ -290,7 +297,7 @@ IceCandidatePairEventType GetRuntimeIceCandidatePairEventType( case rtclog::IceCandidatePairEvent::CHECK_RESPONSE_RECEIVED: return IceCandidatePairEventType::kCheckResponseReceived; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return IceCandidatePairEventType::kCheckSent; } @@ -311,7 +318,7 @@ VideoCodecType GetRuntimeCodecType(rtclog2::FrameDecodedEvents::Codec codec) { "VideoCodecType::kVideoCodecMultiplex"; return VideoCodecType::kVideoCodecMultiplex; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return VideoCodecType::kVideoCodecMultiplex; } @@ -768,7 +775,7 @@ BandwidthUsage GetRuntimeDetectorState( case rtclog2::DelayBasedBweUpdates::BWE_UNKNOWN_STATE: break; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return BandwidthUsage::kBwNormal; } @@ -784,7 +791,7 @@ ProbeFailureReason GetRuntimeProbeFailureReason( case rtclog2::BweProbeResultFailure::UNKNOWN: break; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return ProbeFailureReason::kTimeout; } @@ -802,10 +809,10 @@ DtlsTransportState GetRuntimeDtlsTransportState( case rtclog2::DtlsTransportStateEvent::DTLS_TRANSPORT_FAILED: return DtlsTransportState::kFailed; case rtclog2::DtlsTransportStateEvent::UNKNOWN_DTLS_TRANSPORT_STATE: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return DtlsTransportState::kNumValues; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return DtlsTransportState::kNumValues; } @@ -823,7 +830,7 @@ IceCandidatePairConfigType GetRuntimeIceCandidatePairConfigType( case rtclog2::IceCandidatePairConfig::UNKNOWN_CONFIG_TYPE: break; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return IceCandidatePairConfigType::kAdded; } @@ -841,7 +848,7 @@ IceCandidateType GetRuntimeIceCandidateType( case rtclog2::IceCandidatePairConfig::UNKNOWN_CANDIDATE_TYPE: return IceCandidateType::kUnknown; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return IceCandidateType::kUnknown; } @@ -859,7 +866,7 @@ IceCandidatePairProtocol GetRuntimeIceCandidatePairProtocol( case rtclog2::IceCandidatePairConfig::UNKNOWN_PROTOCOL: return IceCandidatePairProtocol::kUnknown; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return IceCandidatePairProtocol::kUnknown; } @@ -873,7 +880,7 @@ IceCandidatePairAddressFamily GetRuntimeIceCandidatePairAddressFamily( case rtclog2::IceCandidatePairConfig::UNKNOWN_ADDRESS_FAMILY: return IceCandidatePairAddressFamily::kUnknown; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return IceCandidatePairAddressFamily::kUnknown; } @@ -893,7 +900,7 @@ IceCandidateNetworkType GetRuntimeIceCandidateNetworkType( case rtclog2::IceCandidatePairConfig::UNKNOWN_NETWORK_TYPE: return IceCandidateNetworkType::kUnknown; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return IceCandidateNetworkType::kUnknown; } @@ -911,7 +918,7 @@ IceCandidatePairEventType GetRuntimeIceCandidatePairEventType( case rtclog2::IceCandidatePairEvent::UNKNOWN_CHECK_TYPE: break; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return IceCandidatePairEventType::kCheckSent; } @@ -945,6 +952,35 @@ std::vector GetRuntimeRtpHeaderExtensionConfig( } // End of conversion functions. +LoggedPacketInfo::LoggedPacketInfo(const LoggedRtpPacket& rtp, + LoggedMediaType media_type, + bool rtx, + Timestamp capture_time) + : ssrc(rtp.header.ssrc), + stream_seq_no(rtp.header.sequenceNumber), + size(static_cast(rtp.total_length)), + payload_size(static_cast(rtp.total_length - + rtp.header.paddingLength - + rtp.header.headerLength)), + padding_size(static_cast(rtp.header.paddingLength)), + payload_type(rtp.header.payloadType), + media_type(media_type), + rtx(rtx), + marker_bit(rtp.header.markerBit), + has_transport_seq_no(rtp.header.extension.hasTransportSequenceNumber), + transport_seq_no(static_cast( + has_transport_seq_no ? rtp.header.extension.transportSequenceNumber + : 0)), + capture_time(capture_time), + log_packet_time(Timestamp::Micros(rtp.log_time_us())), + reported_send_time(rtp.header.extension.hasAbsoluteSendTime + ? rtp.header.extension.GetAbsoluteSendTimestamp() + : Timestamp::MinusInfinity()) {} + +LoggedPacketInfo::LoggedPacketInfo(const LoggedPacketInfo&) = default; + +LoggedPacketInfo::~LoggedPacketInfo() {} + ParsedRtcEventLog::~ParsedRtcEventLog() = default; ParsedRtcEventLog::LoggedRtpStreamIncoming::LoggedRtpStreamIncoming() = default; @@ -1171,24 +1207,24 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::ParseStream( const int64_t timestamp_us = incoming.rtcp.timestamp.us(); const uint8_t* packet_begin = incoming.rtcp.raw_data.data(); const uint8_t* packet_end = packet_begin + incoming.rtcp.raw_data.size(); - auto status = StoreRtcpBlocks( + auto store_rtcp_status = StoreRtcpBlocks( timestamp_us, packet_begin, packet_end, &incoming_sr_, &incoming_rr_, &incoming_xr_, &incoming_remb_, &incoming_nack_, &incoming_fir_, &incoming_pli_, &incoming_bye_, &incoming_transport_feedback_, &incoming_loss_notification_); - RTC_RETURN_IF_ERROR(status); + RTC_RETURN_IF_ERROR(store_rtcp_status); } for (const auto& outgoing : outgoing_rtcp_packets_) { const int64_t timestamp_us = outgoing.rtcp.timestamp.us(); const uint8_t* packet_begin = outgoing.rtcp.raw_data.data(); const uint8_t* packet_end = packet_begin + outgoing.rtcp.raw_data.size(); - auto status = StoreRtcpBlocks( + auto store_rtcp_status = StoreRtcpBlocks( timestamp_us, packet_begin, packet_end, &outgoing_sr_, &outgoing_rr_, &outgoing_xr_, &outgoing_remb_, &outgoing_nack_, &outgoing_fir_, &outgoing_pli_, &outgoing_bye_, &outgoing_transport_feedback_, &outgoing_loss_notification_); - RTC_RETURN_IF_ERROR(status); + RTC_RETURN_IF_ERROR(store_rtcp_status); } // Store first and last timestamp events that might happen before the call is @@ -1267,18 +1303,34 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::ParseStream( ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::ParseStreamInternal( absl::string_view s) { constexpr uint64_t kMaxEventSize = 10000000; // Sanity check. + // Protobuf defines the message tag as + // (field_number << 3) | wire_type. In the legacy encoding, the field number + // is supposed to be 1 and the wire type for a length-delimited field is 2. + // In the new encoding we still expect the wire type to be 2, but the field + // number will be greater than 1. + constexpr uint64_t kExpectedV1Tag = (1 << 3) | 2; + bool success = false; + + // "Peek" at the first varint. + absl::string_view event_start = s; + uint64_t tag = 0; + std::tie(success, std::ignore) = DecodeVarInt(s, &tag); + if (!success) { + RTC_LOG(LS_WARNING) << "Failed to read varint from beginning of event log."; + RTC_PARSE_WARN_AND_RETURN_SUCCESS_IF(allow_incomplete_logs_, + kIncompleteLogError); + return ParseStatus::Error("Failed to read field tag varint", __FILE__, + __LINE__); + } + s = event_start; + + if (tag >> 1 == static_cast(RtcEvent::Type::BeginV3Log)) { + return ParseStreamInternalV3(s); + } while (!s.empty()) { - absl::string_view event_start = s; - bool success = false; - - // Read the next message tag. Protobuf defines the message tag as - // (field_number << 3) | wire_type. In the legacy encoding, the field number - // is supposed to be 1 and the wire type for a length-delimited field is 2. - // In the new encoding we still expect the wire type to be 2, but the field - // number will be greater than 1. - constexpr uint64_t kExpectedV1Tag = (1 << 3) | 2; - uint64_t tag = 0; + // If not, "reset" event_start and read the field tag for the next event. + event_start = s; std::tie(success, s) = DecodeVarInt(s, &tag); if (!success) { RTC_LOG(LS_WARNING) @@ -1288,6 +1340,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::ParseStreamInternal( return ParseStatus::Error("Failed to read field tag varint", __FILE__, __LINE__); } + constexpr uint64_t kWireTypeMask = 0x07; const uint64_t wire_type = tag & kWireTypeMask; if (wire_type != 2) { @@ -1311,12 +1364,17 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::ParseStreamInternal( } if (message_length > s.size()) { - RTC_LOG(LS_WARNING) << "Protobuf message length is too large."; + RTC_LOG(LS_WARNING) << "Protobuf message length is larger than the " + "remaining bytes in the proto."; RTC_PARSE_WARN_AND_RETURN_SUCCESS_IF(allow_incomplete_logs_, kIncompleteLogError); - RTC_PARSE_CHECK_OR_RETURN_LE(message_length, kMaxEventSize); + return ParseStatus::Error( + "Incomplete message: the length of the next message is larger than " + "the remaining bytes in the proto", + __FILE__, __LINE__); } + RTC_PARSE_CHECK_OR_RETURN_LE(message_length, kMaxEventSize); // Skip forward to the start of the next event. s = s.substr(message_length); size_t total_event_size = event_start.size() - s.size(); @@ -1352,6 +1410,155 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::ParseStreamInternal( return ParseStatus::Success(); } +ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::ParseStreamInternalV3( + absl::string_view s) { + constexpr uint64_t kMaxEventSize = 10000000; // Sanity check. + bool expect_begin_log_event = true; + bool success = false; + + while (!s.empty()) { + // Read event type. + uint64_t event_tag = 0; + std::tie(success, s) = DecodeVarInt(s, &event_tag); + RTC_PARSE_CHECK_OR_RETURN_MESSAGE(success, "Failed to read event type."); + bool batched = event_tag & 1; + uint64_t event_type = event_tag >> 1; + + // Read event size + uint64_t event_size_bytes = 0; + std::tie(success, s) = DecodeVarInt(s, &event_size_bytes); + RTC_PARSE_CHECK_OR_RETURN_MESSAGE(success, "Failed to read event size."); + if (event_size_bytes > kMaxEventSize || event_size_bytes > s.size()) { + RTC_LOG(LS_WARNING) << "Event size is too large."; + RTC_PARSE_CHECK_OR_RETURN_LE(event_size_bytes, kMaxEventSize); + RTC_PARSE_CHECK_OR_RETURN_LE(event_size_bytes, s.size()); + } + + // Read remaining event fields into a buffer. + absl::string_view event_fields = s.substr(0, event_size_bytes); + s = s.substr(event_size_bytes); + + if (expect_begin_log_event) { + RTC_PARSE_CHECK_OR_RETURN_EQ( + event_type, static_cast(RtcEvent::Type::BeginV3Log)); + expect_begin_log_event = false; + } + + switch (event_type) { + case static_cast(RtcEvent::Type::BeginV3Log): + RtcEventBeginLog::Parse(event_fields, batched, start_log_events_); + break; + case static_cast(RtcEvent::Type::EndV3Log): + RtcEventEndLog::Parse(event_fields, batched, stop_log_events_); + expect_begin_log_event = true; + break; + case static_cast(RtcEvent::Type::AlrStateEvent): + RtcEventAlrState::Parse(event_fields, batched, alr_state_events_); + break; + case static_cast(RtcEvent::Type::AudioPlayout): + RtcEventAudioPlayout::Parse(event_fields, batched, + audio_playout_events_); + break; + case static_cast(RtcEvent::Type::BweUpdateDelayBased): + RtcEventBweUpdateDelayBased::Parse(event_fields, batched, + bwe_delay_updates_); + break; + case static_cast(RtcEvent::Type::AudioNetworkAdaptation): + RtcEventAudioNetworkAdaptation::Parse(event_fields, batched, + audio_network_adaptation_events_); + break; + case static_cast(RtcEvent::Type::AudioReceiveStreamConfig): + RtcEventAudioReceiveStreamConfig::Parse(event_fields, batched, + audio_recv_configs_); + break; + case static_cast(RtcEvent::Type::AudioSendStreamConfig): + RtcEventAudioSendStreamConfig::Parse(event_fields, batched, + audio_send_configs_); + break; + case static_cast(RtcEvent::Type::BweUpdateLossBased): + RtcEventBweUpdateLossBased::Parse(event_fields, batched, + bwe_loss_updates_); + break; + case static_cast(RtcEvent::Type::DtlsTransportState): + RtcEventDtlsTransportState::Parse(event_fields, batched, + dtls_transport_states_); + break; + case static_cast(RtcEvent::Type::DtlsWritableState): + RtcEventDtlsWritableState::Parse(event_fields, batched, + dtls_writable_states_); + break; + case static_cast(RtcEvent::Type::FrameDecoded): + RtcEventFrameDecoded::Parse(event_fields, batched, decoded_frames_); + break; + case static_cast(RtcEvent::Type::GenericAckReceived): + RtcEventGenericAckReceived::Parse(event_fields, batched, + generic_acks_received_); + break; + case static_cast(RtcEvent::Type::GenericPacketReceived): + RtcEventGenericPacketReceived::Parse(event_fields, batched, + generic_packets_received_); + break; + case static_cast(RtcEvent::Type::GenericPacketSent): + RtcEventGenericPacketSent::Parse(event_fields, batched, + generic_packets_sent_); + break; + case static_cast(RtcEvent::Type::IceCandidatePairConfig): + RtcEventIceCandidatePairConfig::Parse(event_fields, batched, + ice_candidate_pair_configs_); + break; + case static_cast(RtcEvent::Type::IceCandidatePairEvent): + RtcEventIceCandidatePair::Parse(event_fields, batched, + ice_candidate_pair_events_); + break; + case static_cast(RtcEvent::Type::ProbeClusterCreated): + RtcEventProbeClusterCreated::Parse(event_fields, batched, + bwe_probe_cluster_created_events_); + break; + case static_cast(RtcEvent::Type::ProbeResultFailure): + RtcEventProbeResultFailure::Parse(event_fields, batched, + bwe_probe_failure_events_); + break; + case static_cast(RtcEvent::Type::ProbeResultSuccess): + RtcEventProbeResultSuccess::Parse(event_fields, batched, + bwe_probe_success_events_); + break; + case static_cast(RtcEvent::Type::RemoteEstimateEvent): + RtcEventRemoteEstimate::Parse(event_fields, batched, + remote_estimate_events_); + break; + case static_cast(RtcEvent::Type::RouteChangeEvent): + RtcEventRouteChange::Parse(event_fields, batched, route_change_events_); + break; + case static_cast(RtcEvent::Type::RtcpPacketIncoming): + RtcEventRtcpPacketIncoming::Parse(event_fields, batched, + incoming_rtcp_packets_); + break; + case static_cast(RtcEvent::Type::RtcpPacketOutgoing): + RtcEventRtcpPacketOutgoing::Parse(event_fields, batched, + outgoing_rtcp_packets_); + break; + case static_cast(RtcEvent::Type::RtpPacketIncoming): + RtcEventRtpPacketIncoming::Parse(event_fields, batched, + incoming_rtp_packets_map_); + break; + case static_cast(RtcEvent::Type::RtpPacketOutgoing): + RtcEventRtpPacketOutgoing::Parse(event_fields, batched, + outgoing_rtp_packets_map_); + break; + case static_cast(RtcEvent::Type::VideoReceiveStreamConfig): + RtcEventVideoReceiveStreamConfig::Parse(event_fields, batched, + video_recv_configs_); + break; + case static_cast(RtcEvent::Type::VideoSendStreamConfig): + RtcEventVideoSendStreamConfig::Parse(event_fields, batched, + video_send_configs_); + break; + } + } + + return ParseStatus::Success(); +} + template void ParsedRtcEventLog::StoreFirstAndLastTimestamp(const std::vector& v) { if (v.empty()) @@ -1363,7 +1570,6 @@ void ParsedRtcEventLog::StoreFirstAndLastTimestamp(const std::vector& v) { ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreParsedLegacyEvent( const rtclog::Event& event) { RTC_PARSE_CHECK_OR_RETURN(event.has_type()); - RTC_PARSE_CHECK_OR_RETURN(event.has_type()); switch (event.type()) { case rtclog::Event::VIDEO_RECEIVER_CONFIG_EVENT: { auto config = GetVideoReceiveConfig(event); @@ -1421,40 +1627,45 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreParsedLegacyEvent( break; } case rtclog::Event::RTP_EVENT: { - PacketDirection direction; - uint8_t header[IP_PACKET_SIZE]; - size_t header_length; - size_t total_length; - ParseStatus status = GetRtpHeader(event, &direction, header, - &header_length, &total_length, nullptr); - RTC_RETURN_IF_ERROR(status); + RTC_PARSE_CHECK_OR_RETURN(event.has_rtp_packet()); + const rtclog::RtpPacket& rtp_packet = event.rtp_packet(); + RTC_PARSE_CHECK_OR_RETURN(rtp_packet.has_header()); + RTC_PARSE_CHECK_OR_RETURN(rtp_packet.has_incoming()); + RTC_PARSE_CHECK_OR_RETURN(rtp_packet.has_packet_length()); + size_t total_length = rtp_packet.packet_length(); + + // Use RtpPacketReceived instead of more generic RtpPacket because former + // has a buildin convertion to RTPHeader. + RtpPacketReceived rtp_header; + RTC_PARSE_CHECK_OR_RETURN(rtp_header.Parse(rtp_packet.header())); + + if (const RtpHeaderExtensionMap* extension_map = GetRtpHeaderExtensionMap( + rtp_packet.incoming(), rtp_header.Ssrc())) { + rtp_header.IdentifyExtensions(*extension_map); + } - uint32_t ssrc = ByteReader::ReadBigEndian(header + 8); - const RtpHeaderExtensionMap* extension_map = - GetRtpHeaderExtensionMap(direction, ssrc); - RtpUtility::RtpHeaderParser rtp_parser(header, header_length); RTPHeader parsed_header; - rtp_parser.Parse(&parsed_header, extension_map, /*header_only*/ true); + rtp_header.GetHeader(&parsed_header); // Since we give the parser only a header, there is no way for it to know // the padding length. The best solution would be to log the padding // length in RTC event log. In absence of it, we assume the RTP packet to // contain only padding, if the padding bit is set. // TODO(webrtc:9730): Use a generic way to obtain padding length. - if ((header[0] & 0x20) != 0) - parsed_header.paddingLength = total_length - header_length; + if (rtp_header.has_padding()) + parsed_header.paddingLength = total_length - rtp_header.size(); RTC_PARSE_CHECK_OR_RETURN(event.has_timestamp_us()); int64_t timestamp_us = event.timestamp_us(); - if (direction == kIncomingPacket) { + if (rtp_packet.incoming()) { incoming_rtp_packets_map_[parsed_header.ssrc].push_back( LoggedRtpPacketIncoming(Timestamp::Micros(timestamp_us), - parsed_header, header_length, + parsed_header, rtp_header.size(), total_length)); } else { outgoing_rtp_packets_map_[parsed_header.ssrc].push_back( LoggedRtpPacketOutgoing(Timestamp::Micros(timestamp_us), - parsed_header, header_length, + parsed_header, rtp_header.size(), total_length)); } break; @@ -1569,72 +1780,24 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreParsedLegacyEvent( return ParseStatus::Success(); } -// The header must have space for at least IP_PACKET_SIZE bytes. -ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::GetRtpHeader( - const rtclog::Event& event, - PacketDirection* incoming, - uint8_t* header, - size_t* header_length, - size_t* total_length, - int* probe_cluster_id) const { - RTC_PARSE_CHECK_OR_RETURN(event.has_type()); - RTC_PARSE_CHECK_OR_RETURN_EQ(event.type(), rtclog::Event::RTP_EVENT); - RTC_PARSE_CHECK_OR_RETURN(event.has_rtp_packet()); - const rtclog::RtpPacket& rtp_packet = event.rtp_packet(); - // Get direction of packet. - RTC_PARSE_CHECK_OR_RETURN(rtp_packet.has_incoming()); - if (incoming != nullptr) { - *incoming = rtp_packet.incoming() ? kIncomingPacket : kOutgoingPacket; - } - // Get packet length. - RTC_PARSE_CHECK_OR_RETURN(rtp_packet.has_packet_length()); - if (total_length != nullptr) { - *total_length = rtp_packet.packet_length(); - } - // Get header length. - RTC_PARSE_CHECK_OR_RETURN(rtp_packet.has_header()); - if (header_length != nullptr) { - *header_length = rtp_packet.header().size(); - } - if (probe_cluster_id != nullptr) { - if (rtp_packet.has_probe_cluster_id()) { - *probe_cluster_id = rtp_packet.probe_cluster_id(); - RTC_PARSE_CHECK_OR_RETURN_NE(*probe_cluster_id, - PacedPacketInfo::kNotAProbe); - } else { - *probe_cluster_id = PacedPacketInfo::kNotAProbe; - } - } - // Get header contents. - if (header != nullptr) { - const size_t kMinRtpHeaderSize = 12; - RTC_PARSE_CHECK_OR_RETURN_GE(rtp_packet.header().size(), kMinRtpHeaderSize); - RTC_PARSE_CHECK_OR_RETURN_LE(rtp_packet.header().size(), - static_cast(IP_PACKET_SIZE)); - memcpy(header, rtp_packet.header().data(), rtp_packet.header().size()); - } - return ParseStatus::Success(); -} - const RtpHeaderExtensionMap* ParsedRtcEventLog::GetRtpHeaderExtensionMap( - PacketDirection direction, + bool incoming, uint32_t ssrc) { - auto& extensions_maps = direction == PacketDirection::kIncomingPacket - ? incoming_rtp_extensions_maps_ - : outgoing_rtp_extensions_maps_; + auto& extensions_maps = + incoming ? incoming_rtp_extensions_maps_ : outgoing_rtp_extensions_maps_; auto it = extensions_maps.find(ssrc); if (it != extensions_maps.end()) { return &(it->second); } if (parse_unconfigured_header_extensions_ == UnconfiguredHeaderExtensions::kAttemptWebrtcDefaultConfig) { - RTC_LOG(LS_WARNING) << "Using default header extension map for SSRC " - << ssrc; + RTC_DLOG(LS_WARNING) << "Using default header extension map for SSRC " + << ssrc; extensions_maps.insert(std::make_pair(ssrc, default_extension_map_)); return &default_extension_map_; } - RTC_LOG(LS_WARNING) << "Not parsing header extensions for SSRC " << ssrc - << ". No header extension map found."; + RTC_DLOG(LS_WARNING) << "Not parsing header extensions for SSRC " << ssrc + << ". No header extension map found."; return nullptr; } @@ -1931,7 +2094,7 @@ ParsedRtcEventLog::GetBweProbeFailure(const rtclog::Event& event) const { } else if (pr_event.result() == rtclog::BweProbeResult::TIMEOUT) { res.failure_reason = ProbeFailureReason::kTimeout; } else { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } RTC_PARSE_CHECK_OR_RETURN(!pr_event.has_bitrate_bps()); @@ -2134,7 +2297,7 @@ std::vector ParsedRtcEventLog::GetPacketInfos( // RTX streams don't have a unique clock offset and frequency, so // the RTP timstamps can't be unwrapped. - // Add an offset to avoid |capture_ticks| to become negative in the case + // Add an offset to avoid `capture_ticks` to become negative in the case // of reordering. constexpr int64_t kStartingCaptureTimeTicks = 90 * 48 * 10000; int64_t capture_ticks = @@ -2205,8 +2368,7 @@ std::vector ParsedRtcEventLog::GetPacketInfos( if (packet.received()) { receive_timestamp += TimeDelta::Micros(packet.delta_us()); if (sent->reported_recv_time.IsInfinite()) { - sent->reported_recv_time = - Timestamp::Millis(receive_timestamp.ms()); + sent->reported_recv_time = receive_timestamp; sent->log_feedback_time = log_feedback_time; } } else { @@ -2400,7 +2562,7 @@ ParsedRtcEventLog::ParseStatus ParsedRtcEventLog::StoreParsedNewFormatEvent( } else if (stream.frame_decoded_events_size() == 1) { return StoreFrameDecodedEvents(stream.frame_decoded_events(0)); } else { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return ParseStatus::Success(); } } @@ -3099,7 +3261,7 @@ ParsedRtcEventLog::StoreAudioNetworkAdaptationEvent( runtime_config.enable_dtx = proto.enable_dtx(); } if (proto.has_num_channels()) { - // Note: Encoding N as N-1 only done for |num_channels_deltas|. + // Note: Encoding N as N-1 only done for `num_channels_deltas`. runtime_config.num_channels = proto.num_channels(); } audio_network_adaptation_events_.emplace_back( @@ -3205,12 +3367,12 @@ ParsedRtcEventLog::StoreAudioNetworkAdaptationEvent( runtime_config.frame_length_ms = signed_frame_length_ms; } if (uplink_packet_loss_fraction_values[i].has_value()) { - float uplink_packet_loss_fraction; + float uplink_packet_loss_fraction2; RTC_PARSE_CHECK_OR_RETURN(ParsePacketLossFractionFromProtoFormat( rtc::checked_cast( uplink_packet_loss_fraction_values[i].value()), - &uplink_packet_loss_fraction)); - runtime_config.uplink_packet_loss_fraction = uplink_packet_loss_fraction; + &uplink_packet_loss_fraction2)); + runtime_config.uplink_packet_loss_fraction = uplink_packet_loss_fraction2; } if (enable_fec_values[i].has_value()) { runtime_config.enable_fec = diff --git a/logging/rtc_event_log/rtc_event_log_parser.h b/logging/rtc_event_log/rtc_event_log_parser.h index 4898022fae..4a9ff3495f 100644 --- a/logging/rtc_event_log/rtc_event_log_parser.h +++ b/logging/rtc_event_log/rtc_event_log_parser.h @@ -15,22 +15,24 @@ #include #include #include -#include // pair #include #include "absl/base/attributes.h" #include "api/rtc_event_log/rtc_event_log.h" #include "call/video_receive_stream.h" #include "call/video_send_stream.h" +#include "logging/rtc_event_log/events/logged_rtp_rtcp.h" #include "logging/rtc_event_log/events/rtc_event_alr_state.h" #include "logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h" #include "logging/rtc_event_log/events/rtc_event_audio_playout.h" #include "logging/rtc_event_log/events/rtc_event_audio_receive_stream_config.h" #include "logging/rtc_event_log/events/rtc_event_audio_send_stream_config.h" +#include "logging/rtc_event_log/events/rtc_event_begin_log.h" #include "logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.h" #include "logging/rtc_event_log/events/rtc_event_bwe_update_loss_based.h" #include "logging/rtc_event_log/events/rtc_event_dtls_transport_state.h" #include "logging/rtc_event_log/events/rtc_event_dtls_writable_state.h" +#include "logging/rtc_event_log/events/rtc_event_end_log.h" #include "logging/rtc_event_log/events/rtc_event_frame_decoded.h" #include "logging/rtc_event_log/events/rtc_event_generic_ack_received.h" #include "logging/rtc_event_log/events/rtc_event_generic_packet_received.h" @@ -42,9 +44,12 @@ #include "logging/rtc_event_log/events/rtc_event_probe_result_success.h" #include "logging/rtc_event_log/events/rtc_event_remote_estimate.h" #include "logging/rtc_event_log/events/rtc_event_route_change.h" +#include "logging/rtc_event_log/events/rtc_event_rtcp_packet_incoming.h" +#include "logging/rtc_event_log/events/rtc_event_rtcp_packet_outgoing.h" +#include "logging/rtc_event_log/events/rtc_event_rtp_packet_incoming.h" +#include "logging/rtc_event_log/events/rtc_event_rtp_packet_outgoing.h" #include "logging/rtc_event_log/events/rtc_event_video_receive_stream_config.h" #include "logging/rtc_event_log/events/rtc_event_video_send_stream_config.h" -#include "logging/rtc_event_log/logged_events.h" #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "modules/rtp_rtcp/source/rtcp_packet/common_header.h" #include "rtc_base/ignore_wundef.h" @@ -64,6 +69,80 @@ namespace webrtc { enum PacketDirection { kIncomingPacket = 0, kOutgoingPacket }; +enum class LoggedMediaType : uint8_t { kUnknown, kAudio, kVideo }; + +struct LoggedPacketInfo { + LoggedPacketInfo(const LoggedRtpPacket& rtp, + LoggedMediaType media_type, + bool rtx, + Timestamp capture_time); + LoggedPacketInfo(const LoggedPacketInfo&); + ~LoggedPacketInfo(); + int64_t log_time_ms() const { return log_packet_time.ms(); } + int64_t log_time_us() const { return log_packet_time.us(); } + uint32_t ssrc; + uint16_t stream_seq_no; + uint16_t size; + uint16_t payload_size; + uint16_t padding_size; + uint16_t overhead = 0; + uint8_t payload_type; + LoggedMediaType media_type = LoggedMediaType::kUnknown; + bool rtx = false; + bool marker_bit = false; + bool has_transport_seq_no = false; + bool last_in_feedback = false; + uint16_t transport_seq_no = 0; + // The RTP header timestamp unwrapped and converted from tick count to seconds + // based timestamp. + Timestamp capture_time; + // The time the packet was logged. This is the receive time for incoming + // packets and send time for outgoing. + Timestamp log_packet_time; + // Send time as reported by abs-send-time extension, For outgoing packets this + // corresponds to log_packet_time, but might be measured using another clock. + Timestamp reported_send_time; + // The receive time that was reported in feedback. For incoming packets this + // corresponds to log_packet_time, but might be measured using another clock. + // PlusInfinity indicates that the packet was lost. + Timestamp reported_recv_time = Timestamp::MinusInfinity(); + // The time feedback message was logged. This is the feedback send time for + // incoming packets and feedback receive time for outgoing. + // PlusInfinity indicates that feedback was expected but not received. + Timestamp log_feedback_time = Timestamp::MinusInfinity(); + // The delay betweeen receiving an RTP packet and sending feedback for + // incoming packets. For outgoing packets we don't know the feedback send + // time, and this is instead calculated as the difference in reported receive + // time between this packet and the last packet in the same feedback message. + TimeDelta feedback_hold_duration = TimeDelta::MinusInfinity(); +}; + +struct InferredRouteChangeEvent { + int64_t log_time_ms() const { return log_time.ms(); } + int64_t log_time_us() const { return log_time.us(); } + uint32_t route_id; + Timestamp log_time = Timestamp::MinusInfinity(); + uint16_t send_overhead; + uint16_t return_overhead; +}; + +enum class LoggedIceEventType { + kAdded, + kUpdated, + kDestroyed, + kSelected, + kCheckSent, + kCheckReceived, + kCheckResponseSent, + kCheckResponseReceived, +}; + +struct LoggedIceEvent { + uint32_t candidate_pair_id; + Timestamp log_time; + LoggedIceEventType event_type; +}; + // This class is used to process lists of LoggedRtpPacketIncoming // and LoggedRtpPacketOutgoing without duplicating the code. // TODO(terelius): Remove this class. Instead use e.g. a vector of pointers @@ -240,48 +319,11 @@ class ParsedRtcEventLog { kDontParse, kAttemptWebrtcDefaultConfig }; - class ParseStatus { - public: - static ParseStatus Success() { return ParseStatus(); } - static ParseStatus Error(std::string error, std::string file, int line) { - return ParseStatus(error, file, line); - } - bool ok() const { return error_.empty() && file_.empty() && line_ == 0; } - std::string message() const { - return error_ + " failed at " + file_ + " line " + std::to_string(line_); - } - - ABSL_DEPRECATED("Use ok() instead") operator bool() const { return ok(); } - - private: - ParseStatus() : error_(), file_(), line_(0) {} - ParseStatus(std::string error, std::string file, int line) - : error_(error), file_(file), line_(line) {} - std::string error_; - std::string file_; - int line_; - }; + using ParseStatus = RtcEventLogParseStatus; template - class ParseStatusOr { - public: - ParseStatusOr(const ParseStatus& error) // NOLINT - : status_(error), value_() {} - ParseStatusOr(const T& value) // NOLINT - : status_(ParseStatus::Success()), value_(value) {} - bool ok() const { return status_.ok(); } - const T& value() const& { - RTC_DCHECK(status_.ok()); - return value_; - } - std::string message() const { return status_.message(); } - const ParseStatus& status() const { return status_; } - - private: - ParseStatus status_; - T value_; - }; + using ParseStatusOr = RtcEventLogParseStatusOr; struct LoggedRtpStreamIncoming { LoggedRtpStreamIncoming(); @@ -620,6 +662,7 @@ class ParsedRtcEventLog { private: ABSL_MUST_USE_RESULT ParseStatus ParseStreamInternal(absl::string_view s); + ABSL_MUST_USE_RESULT ParseStatus ParseStreamInternalV3(absl::string_view s); ABSL_MUST_USE_RESULT ParseStatus StoreParsedLegacyEvent(const rtclog::Event& event); @@ -627,27 +670,14 @@ class ParsedRtcEventLog { template void StoreFirstAndLastTimestamp(const std::vector& v); - // Reads the header, direction, header length and packet length from the RTP - // event at |index|, and stores the values in the corresponding output - // parameters. Each output parameter can be set to nullptr if that value - // isn't needed. - // NB: The header must have space for at least IP_PACKET_SIZE bytes. - ParseStatus GetRtpHeader(const rtclog::Event& event, - PacketDirection* incoming, - uint8_t* header, - size_t* header_length, - size_t* total_length, - int* probe_cluster_id) const; - // Returns: a pointer to a header extensions map acquired from parsing // corresponding Audio/Video Sender/Receiver config events. // Warning: if the same SSRC is reused by both video and audio streams during // call, extensions maps may be incorrect (the last one would be returned). - const RtpHeaderExtensionMap* GetRtpHeaderExtensionMap( - PacketDirection direction, - uint32_t ssrc); + const RtpHeaderExtensionMap* GetRtpHeaderExtensionMap(bool incoming, + uint32_t ssrc); - // Reads packet, direction and packet length from the RTCP event at |index|, + // Reads packet, direction and packet length from the RTCP event at `index`, // and stores the values in the corresponding output parameters. // Each output parameter can be set to nullptr if that value isn't needed. // NB: The packet must have space for at least IP_PACKET_SIZE bytes. diff --git a/logging/rtc_event_log/rtc_event_log_unittest.cc b/logging/rtc_event_log/rtc_event_log_unittest.cc index 323e4fe009..8f32eecd8b 100644 --- a/logging/rtc_event_log/rtc_event_log_unittest.cc +++ b/logging/rtc_event_log/rtc_event_log_unittest.cc @@ -8,6 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include "api/rtc_event_log/rtc_event_log.h" + #include #include #include @@ -17,9 +19,7 @@ #include #include -#include "api/rtc_event_log/rtc_event_log.h" #include "api/rtc_event_log/rtc_event_log_factory.h" -#include "api/rtc_event_log_output_file.h" #include "api/task_queue/default_task_queue_factory.h" #include "logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h" #include "logging/rtc_event_log/events/rtc_event_audio_playout.h" @@ -50,6 +50,7 @@ #include "rtc_base/fake_clock.h" #include "rtc_base/random.h" #include "test/gtest.h" +#include "test/logging/memory_log_writer.h" #include "test/testsupport/file_utils.h" namespace webrtc { @@ -113,11 +114,12 @@ class RtcEventLogSession output_period_ms_(std::get<1>(GetParam())), encoding_type_(std::get<2>(GetParam())), gen_(seed_ * 880001UL), - verifier_(encoding_type_) { + verifier_(encoding_type_), + log_storage_(), + log_output_factory_(log_storage_.CreateFactory()) { clock_.SetTime(Timestamp::Micros(prng_.Rand())); // Find the name of the current test, in order to use it as a temporary // filename. - // TODO(terelius): Use a general utility function to generate a temp file. auto test_info = ::testing::UnitTest::GetInstance()->current_test_info(); std::string test_name = std::string(test_info->test_case_name()) + "_" + test_info->name(); @@ -125,7 +127,7 @@ class RtcEventLogSession temp_filename_ = test::OutputPath() + test_name; } - // Create and buffer the config events and |num_events_before_log_start| + // Create and buffer the config events and `num_events_before_log_start` // randomized non-config events. Then call StartLogging and finally create and // write the remaining non-config events. void WriteLog(EventCounts count, size_t num_events_before_log_start); @@ -203,6 +205,8 @@ class RtcEventLogSession test::EventVerifier verifier_; rtc::ScopedFakeClock clock_; std::string temp_filename_; + MemoryLogStorage log_storage_; + std::unique_ptr log_output_factory_; }; bool SsrcUsed( @@ -272,9 +276,9 @@ void RtcEventLogSession::WriteVideoRecvConfigs(size_t video_recv_streams, } while (SsrcUsed(ssrc, incoming_extensions_)); RtpHeaderExtensionMap extensions = gen_.NewRtpHeaderExtensionMap(); incoming_extensions_.emplace_back(ssrc, extensions); - auto event = gen_.NewVideoReceiveStreamConfig(ssrc, extensions); - event_log->Log(event->Copy()); - video_recv_config_list_.push_back(std::move(event)); + auto new_event = gen_.NewVideoReceiveStreamConfig(ssrc, extensions); + event_log->Log(new_event->Copy()); + video_recv_config_list_.push_back(std::move(new_event)); } } @@ -314,7 +318,7 @@ void RtcEventLogSession::WriteLog(EventCounts count, auto task_queue_factory = CreateDefaultTaskQueueFactory(); RtcEventLogFactory rtc_event_log_factory(task_queue_factory.get()); - // The log file will be flushed to disk when the event_log goes out of scope. + // The log will be flushed to output when the event_log goes out of scope. std::unique_ptr event_log = rtc_event_log_factory.CreateRtcEventLog(encoding_type_); @@ -333,9 +337,8 @@ void RtcEventLogSession::WriteLog(EventCounts count, for (; remaining_events > 0; remaining_events--) { if (remaining_events == remaining_events_at_start) { clock_.AdvanceTime(TimeDelta::Millis(prng_.Rand(20))); - event_log->StartLogging( - std::make_unique(temp_filename_, 10000000), - output_period_ms_); + event_log->StartLogging(log_output_factory_->Create(temp_filename_), + output_period_ms_); start_time_us_ = rtc::TimeMicros(); utc_start_time_us_ = rtc::TimeUTCMicros(); } @@ -546,7 +549,7 @@ void RtcEventLogSession::WriteLog(EventCounts count, } selection -= count.generic_acks_received; - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } event_log->StopLogging(); @@ -555,12 +558,14 @@ void RtcEventLogSession::WriteLog(EventCounts count, ASSERT_EQ(count.total_nonconfig_events(), static_cast(0)); } -// Read the file and verify that what we read back from the event log is the +// Read the log and verify that what we read back from the event log is the // same as what we wrote down. void RtcEventLogSession::ReadAndVerifyLog() { - // Read the generated file from disk. + // Read the generated log from memory. ParsedRtcEventLog parsed_log; - ASSERT_TRUE(parsed_log.ParseFile(temp_filename_).ok()); + auto it = log_storage_.logs().find(temp_filename_); + ASSERT_TRUE(it != log_storage_.logs().end()); + ASSERT_TRUE(parsed_log.ParseString(it->second).ok()); // Start and stop events. auto& parsed_start_log_events = parsed_log.start_log_events(); @@ -787,9 +792,6 @@ void RtcEventLogSession::ReadAndVerifyLog() { std::min(start_time_us_ / 1000, first_timestamp_ms_)); EXPECT_EQ(parsed_log.first_log_segment().stop_time_ms(), stop_time_us_ / 1000); - - // Clean up temporary file - can be pretty slow. - remove(temp_filename_.c_str()); } } // namespace @@ -875,9 +877,14 @@ class RtcEventLogCircularBufferTest : public ::testing::TestWithParam { public: RtcEventLogCircularBufferTest() - : encoding_type_(GetParam()), verifier_(encoding_type_) {} + : encoding_type_(GetParam()), + verifier_(encoding_type_), + log_storage_(), + log_output_factory_(log_storage_.CreateFactory()) {} const RtcEventLog::EncodingType encoding_type_; const test::EventVerifier verifier_; + MemoryLogStorage log_storage_; + std::unique_ptr log_output_factory_; }; TEST_P(RtcEventLogCircularBufferTest, KeepsMostRecentEvents) { @@ -899,8 +906,8 @@ TEST_P(RtcEventLogCircularBufferTest, KeepsMostRecentEvents) { auto task_queue_factory = CreateDefaultTaskQueueFactory(); RtcEventLogFactory rtc_event_log_factory(task_queue_factory.get()); - // When `log` goes out of scope, it causes the log file to be flushed - // to disk. + // When `log` goes out of scope, the contents are flushed + // to the output. std::unique_ptr log = rtc_event_log_factory.CreateRtcEventLog(encoding_type_); @@ -917,16 +924,17 @@ TEST_P(RtcEventLogCircularBufferTest, KeepsMostRecentEvents) { } int64_t start_time_us = rtc::TimeMicros(); int64_t utc_start_time_us = rtc::TimeUTCMicros(); - log->StartLogging( - std::make_unique(temp_filename, 10000000), - RtcEventLog::kImmediateOutput); + log->StartLogging(log_output_factory_->Create(temp_filename), + RtcEventLog::kImmediateOutput); fake_clock->AdvanceTime(TimeDelta::Millis(10)); int64_t stop_time_us = rtc::TimeMicros(); log->StopLogging(); - // Read the generated file from disk. + // Read the generated log from memory. ParsedRtcEventLog parsed_log; - ASSERT_TRUE(parsed_log.ParseFile(temp_filename).ok()); + auto it = log_storage_.logs().find(temp_filename); + ASSERT_TRUE(it != log_storage_.logs().end()); + ASSERT_TRUE(parsed_log.ParseString(it->second).ok()); const auto& start_log_events = parsed_log.start_log_events(); ASSERT_EQ(start_log_events.size(), 1u); @@ -960,9 +968,6 @@ TEST_P(RtcEventLogCircularBufferTest, KeepsMostRecentEvents) { RtcEventProbeResultSuccess(first_id + i, first_bitrate_bps + i * 1000), probe_success_events[i]); } - - // Clean up temporary file - can be pretty slow. - remove(temp_filename.c_str()); } INSTANTIATE_TEST_SUITE_P( diff --git a/logging/rtc_event_log/rtc_event_log_unittest_helper.cc b/logging/rtc_event_log/rtc_event_log_unittest_helper.cc index 0960c98502..e77a67182b 100644 --- a/logging/rtc_event_log/rtc_event_log_unittest_helper.cc +++ b/logging/rtc_event_log/rtc_event_log_unittest_helper.cc @@ -462,7 +462,7 @@ EventGenerator::NewRtcpPacketIncoming() { return std::make_unique(buffer); } default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); rtc::Buffer buffer; return std::make_unique(buffer); } @@ -531,7 +531,7 @@ EventGenerator::NewRtcpPacketOutgoing() { return std::make_unique(buffer); } default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); rtc::Buffer buffer; return std::make_unique(buffer); } diff --git a/logging/rtc_event_log/rtc_event_log_unittest_helper.h b/logging/rtc_event_log/rtc_event_log_unittest_helper.h index eb16592271..94a46c195c 100644 --- a/logging/rtc_event_log/rtc_event_log_unittest_helper.h +++ b/logging/rtc_event_log/rtc_event_log_unittest_helper.h @@ -98,7 +98,7 @@ class EventGenerator { rtcp::TransportFeedback NewTransportFeedback(); rtcp::LossNotification NewLossNotification(); - // |all_configured_exts| determines whether the RTP packet exhibits all + // `all_configured_exts` determines whether the RTP packet exhibits all // configured extensions, or a random subset thereof. void RandomizeRtpPacket(size_t payload_size, size_t padding_size, @@ -107,21 +107,21 @@ class EventGenerator { RtpPacket* rtp_packet, bool all_configured_exts); - // |all_configured_exts| determines whether the RTP packet exhibits all + // `all_configured_exts` determines whether the RTP packet exhibits all // configured extensions, or a random subset thereof. std::unique_ptr NewRtpPacketIncoming( uint32_t ssrc, const RtpHeaderExtensionMap& extension_map, bool all_configured_exts = true); - // |all_configured_exts| determines whether the RTP packet exhibits all + // `all_configured_exts` determines whether the RTP packet exhibits all // configured extensions, or a random subset thereof. std::unique_ptr NewRtpPacketOutgoing( uint32_t ssrc, const RtpHeaderExtensionMap& extension_map, bool all_configured_exts = true); - // |configure_all| determines whether all supported extensions are configured, + // `configure_all` determines whether all supported extensions are configured, // or a random subset. RtpHeaderExtensionMap NewRtpHeaderExtensionMap(bool configure_all = false); diff --git a/logging/rtc_event_log/rtc_event_processor.cc b/logging/rtc_event_log/rtc_event_processor.cc index 804e283851..e6a9983b6f 100644 --- a/logging/rtc_event_log/rtc_event_processor.cc +++ b/logging/rtc_event_log/rtc_event_processor.cc @@ -15,7 +15,7 @@ RtcEventProcessor::RtcEventProcessor() = default; RtcEventProcessor::~RtcEventProcessor() = default; void RtcEventProcessor::ProcessEventsInOrder() { - // |event_lists_| is a min-heap of lists ordered by the timestamp of the + // `event_lists_` is a min-heap of lists ordered by the timestamp of the // first element in the list. We therefore process the first element of the // first list, then reinsert the remainder of that list into the heap // if the list still contains unprocessed elements. diff --git a/logging/rtc_event_log/rtc_event_processor.h b/logging/rtc_event_log/rtc_event_processor.h index abb73701db..9bf4c9c5db 100644 --- a/logging/rtc_event_log/rtc_event_processor.h +++ b/logging/rtc_event_log/rtc_event_processor.h @@ -98,7 +98,7 @@ class RtcEventProcessor { // The elements of each list is processed in the index order. To process all // elements in all lists in timestamp order, each list needs to be sorted in // timestamp order prior to insertion. - // N.B. |iterable| is not owned by RtcEventProcessor. The caller must ensure + // N.B. `iterable` is not owned by RtcEventProcessor. The caller must ensure // that the iterable outlives RtcEventProcessor and it must not be modified // until processing has finished. template @@ -122,7 +122,7 @@ class RtcEventProcessor { std::unique_ptr; int insertion_order_index_ = 0; std::vector event_lists_; - // Comparison function to make |event_lists_| into a min heap. + // Comparison function to make `event_lists_` into a min heap. static bool Cmp(const ListPtrType& a, const ListPtrType& b); }; diff --git a/media/BUILD.gn b/media/BUILD.gn index 5f0f527b8f..493b883949 100644 --- a/media/BUILD.gn +++ b/media/BUILD.gn @@ -23,30 +23,11 @@ config("rtc_media_defines_config") { defines = [ "HAVE_WEBRTC_VIDEO" ] } -# Remove once downstream projects stop depend on this. -rtc_source_set("rtc_h264_profile_id") { - visibility = [ "*" ] - sources = [ - "base/h264_profile_level_id.cc", - "base/h264_profile_level_id.h", - ] - public_deps = # no-presubmit-check TODO(webrtc:8603) - [ "../api/video_codecs:video_codecs_api" ] -} - rtc_source_set("rtc_media_config") { visibility = [ "*" ] sources = [ "base/media_config.h" ] } -# Remove once downstream projects stop depend on this. -rtc_source_set("rtc_vp9_profile") { - visibility = [ "*" ] - sources = [ "base/vp9_profile.h" ] - public_deps = # no-presubmit-check TODO(webrtc:8603) - [ "../api/video_codecs:video_codecs_api" ] -} - rtc_library("rtc_sdp_video_format_utils") { visibility = [ "*" ] sources = [ @@ -85,6 +66,7 @@ rtc_library("rtc_media_base") { "../api/transport:stun_types", "../api/transport:webrtc_key_value_config", "../api/transport/rtp:rtp_source", + "../api/units:time_delta", "../api/video:video_bitrate_allocation", "../api/video:video_bitrate_allocator_factory", "../api/video:video_frame", @@ -169,6 +151,7 @@ rtc_library("rtc_simulcast_encoder_adapter") { "../api/video_codecs:rtc_software_fallback_wrappers", "../api/video_codecs:video_codecs_api", "../call:video_stream_api", + "../common_video", "../modules/video_coding:video_codec_interface", "../modules/video_coding:video_coding_utility", "../rtc_base:checks", @@ -228,13 +211,20 @@ rtc_library("rtc_internal_video_codecs") { "../modules/video_coding:webrtc_vp8", "../modules/video_coding:webrtc_vp9", "../modules/video_coding/codecs/av1:libaom_av1_decoder", - "../modules/video_coding/codecs/av1:libaom_av1_encoder", + "../modules/video_coding/codecs/av1:libaom_av1_encoder_if_supported", "../rtc_base:checks", "../rtc_base:rtc_base_approved", "../rtc_base/system:rtc_export", + "../system_wrappers:field_trial", "../test:fake_video_codecs", ] - absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] + if (rtc_include_dav1d_in_internal_decoder_factory) { + deps += [ "../modules/video_coding/codecs/av1:dav1d_decoder" ] + } + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] sources = [ "engine/fake_video_codec_factory.cc", "engine/fake_video_codec_factory.h", @@ -292,6 +282,7 @@ rtc_library("rtc_audio_video") { "../modules/audio_processing:api", "../modules/audio_processing/aec_dump", "../modules/audio_processing/agc:gain_control_interface", + "../modules/rtp_rtcp:rtp_rtcp_format", "../modules/video_coding", "../modules/video_coding:video_codec_interface", "../modules/video_coding:video_coding_utility", @@ -401,6 +392,7 @@ if (rtc_build_dcsctp) { deps = [ ":rtc_data_sctp_transport_internal", "../api:array_view", + "../api/task_queue:task_queue", "../media:rtc_media_base", "../net/dcsctp/public:factory", "../net/dcsctp/public:socket", @@ -410,6 +402,7 @@ if (rtc_build_dcsctp) { "../p2p:rtc_p2p", "../rtc_base:checks", "../rtc_base:rtc_base_approved", + "../rtc_base:socket", "../rtc_base:threading", "../rtc_base/task_utils:pending_task_safety_flag", "../rtc_base/task_utils:to_queued_task", @@ -607,6 +600,7 @@ if (rtc_include_tests) { "../api/test/video:function_video_factory", "../api/transport:field_trial_based_config", "../api/units:time_delta", + "../api/units:timestamp", "../api/video:builtin_video_bitrate_allocator_factory", "../api/video:video_bitrate_allocation", "../api/video:video_codec_constants", @@ -629,6 +623,7 @@ if (rtc_include_tests) { "../modules/video_coding:webrtc_h264", "../modules/video_coding:webrtc_vp8", "../modules/video_coding/codecs/av1:libaom_av1_decoder", + "../modules/video_coding/codecs/av1:libaom_av1_encoder_if_supported", "../p2p:p2p_test_utils", "../rtc_base", "../rtc_base:checks", @@ -649,6 +644,7 @@ if (rtc_include_tests) { "../test:test_main", "../test:test_support", "../test:video_test_common", + "../test/time_controller", ] absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", @@ -668,6 +664,7 @@ if (rtc_include_tests) { "base/video_common_unittest.cc", "engine/encoder_simulcast_proxy_unittest.cc", "engine/internal_decoder_factory_unittest.cc", + "engine/internal_encoder_factory_unittest.cc", "engine/multiplex_codec_factory_unittest.cc", "engine/null_webrtc_video_engine_unittest.cc", "engine/payload_type_mapper_unittest.cc", diff --git a/media/base/adapted_video_track_source.cc b/media/base/adapted_video_track_source.cc index 2fce973f68..f8f8f2dad1 100644 --- a/media/base/adapted_video_track_source.cc +++ b/media/base/adapted_video_track_source.cc @@ -114,4 +114,9 @@ bool AdaptedVideoTrackSource::AdaptFrame(int width, return true; } +void AdaptedVideoTrackSource::ProcessConstraints( + const webrtc::VideoTrackSourceConstraints& constraints) { + broadcaster_.ProcessConstraints(constraints); +} + } // namespace rtc diff --git a/media/base/adapted_video_track_source.h b/media/base/adapted_video_track_source.h index 59ae036ff6..1386fbd9db 100644 --- a/media/base/adapted_video_track_source.h +++ b/media/base/adapted_video_track_source.h @@ -38,7 +38,7 @@ class RTC_EXPORT AdaptedVideoTrackSource ~AdaptedVideoTrackSource() override; protected: - // Allows derived classes to initialize |video_adapter_| with a custom + // Allows derived classes to initialize `video_adapter_` with a custom // alignment. explicit AdaptedVideoTrackSource(int required_alignment); // Checks the apply_rotation() flag. If the frame needs rotation, and it is a @@ -86,6 +86,8 @@ class RTC_EXPORT AdaptedVideoTrackSource rtc::VideoSinkInterface* sink) override {} void RemoveEncodedSink( rtc::VideoSinkInterface* sink) override {} + void ProcessConstraints( + const webrtc::VideoTrackSourceConstraints& constraints) override; cricket::VideoAdapter video_adapter_; diff --git a/media/base/codec.cc b/media/base/codec.cc index cb6913e76a..9b09f5e73b 100644 --- a/media/base/codec.cc +++ b/media/base/codec.cc @@ -81,7 +81,7 @@ void FeedbackParams::Add(const FeedbackParam& param) { return; } if (Has(param)) { - // Param already in |this|. + // Param already in `this`. return; } params_.push_back(param); @@ -412,27 +412,16 @@ bool HasTransportCc(const Codec& codec) { const VideoCodec* FindMatchingCodec( const std::vector& supported_codecs, const VideoCodec& codec) { + webrtc::SdpVideoFormat sdp_video_format{codec.name, codec.params}; for (const VideoCodec& supported_codec : supported_codecs) { - if (IsSameCodec(codec.name, codec.params, supported_codec.name, - supported_codec.params)) { + if (sdp_video_format.IsSameCodec( + {supported_codec.name, supported_codec.params})) { return &supported_codec; } } return nullptr; } -// TODO(crbug.com/1187565): Remove once downstream projects stopped using this -// method in favor of SdpVideoFormat::IsSameCodec(). -bool IsSameCodec(const std::string& name1, - const CodecParameterMap& params1, - const std::string& name2, - const CodecParameterMap& params2) { - // Two codecs are considered the same if the name matches (case insensitive) - // and certain codec-specific parameters match. - return absl::EqualsIgnoreCase(name1, name2) && - IsSameCodecSpecific(name1, params1, name2, params2); -} - // If a decoder supports any H264 profile, it is implicitly assumed to also // support constrained base line even though it's not explicitly listed. void AddH264ConstrainedBaselineProfileToSupportedFormats( diff --git a/media/base/codec.h b/media/base/codec.h index c7c99bf732..cfc31aed1f 100644 --- a/media/base/codec.h +++ b/media/base/codec.h @@ -78,7 +78,7 @@ struct RTC_EXPORT Codec { bool Matches(const Codec& codec) const; bool MatchesCapability(const webrtc::RtpCodecCapability& capability) const; - // Find the parameter for |name| and write the value to |out|. + // Find the parameter for `name` and write the value to `out`. bool GetParam(const std::string& name, std::string* out) const; bool GetParam(const std::string& name, int* out) const; @@ -92,8 +92,8 @@ struct RTC_EXPORT Codec { bool HasFeedbackParam(const FeedbackParam& param) const; void AddFeedbackParam(const FeedbackParam& param); - // Filter |this| feedbacks params such that only those shared by both |this| - // and |other| are kept. + // Filter `this` feedbacks params such that only those shared by both `this` + // and `other` are kept. void IntersectFeedbackParams(const Codec& other); virtual webrtc::RtpCodecParameters ToCodecParameters() const; @@ -176,7 +176,7 @@ struct RTC_EXPORT VideoCodec : public Codec { bool operator!=(const VideoCodec& c) const { return !(*this == c); } - // Return packetization which both |local_codec| and |remote_codec| support. + // Return packetization which both `local_codec` and `remote_codec` support. static absl::optional IntersectPacketization( const VideoCodec& local_codec, const VideoCodec& remote_codec); @@ -202,7 +202,7 @@ struct RTC_EXPORT VideoCodec : public Codec { void SetDefaultParameters(); }; -// Get the codec setting associated with |payload_type|. If there +// Get the codec setting associated with `payload_type`. If there // is no codec associated with that payload type it returns nullptr. template const Codec* FindCodecById(const std::vector& codecs, int payload_type) { @@ -218,15 +218,11 @@ bool HasNack(const Codec& codec); bool HasRemb(const Codec& codec); bool HasRrtr(const Codec& codec); bool HasTransportCc(const Codec& codec); -// Returns the first codec in |supported_codecs| that matches |codec|, or +// Returns the first codec in `supported_codecs` that matches `codec`, or // nullptr if no codec matches. const VideoCodec* FindMatchingCodec( const std::vector& supported_codecs, const VideoCodec& codec); -RTC_EXPORT bool IsSameCodec(const std::string& name1, - const CodecParameterMap& params1, - const std::string& name2, - const CodecParameterMap& params2); RTC_EXPORT void AddH264ConstrainedBaselineProfileToSupportedFormats( std::vector* supported_formats); diff --git a/media/base/codec_unittest.cc b/media/base/codec_unittest.cc index 23bae7b7fe..c5aa5bf186 100644 --- a/media/base/codec_unittest.cc +++ b/media/base/codec_unittest.cc @@ -496,94 +496,3 @@ TEST(CodecTest, H264CostrainedBaselineNotAddedIfAlreadySpecified) { EXPECT_EQ(supported_formats[3], kExplicitlySupportedFormats[3]); EXPECT_EQ(supported_formats.size(), kExplicitlySupportedFormats.size()); } - -// Tests that the helper IsSameCodec returns the correct value for codecs that -// must also be matched on particular parameter values. -using IsSameCodecParamsTestCase = - std::tuple; -class IsSameCodecParamsTest - : public ::testing::TestWithParam< - std::tuple> { - protected: - IsSameCodecParamsTest() { - name_ = std::get<0>(GetParam()); - expected_ = std::get<1>(GetParam()); - const auto& test_case = std::get<2>(GetParam()); - params_left_ = std::get<0>(test_case); - params_right_ = std::get<1>(test_case); - } - - std::string name_; - bool expected_; - cricket::CodecParameterMap params_left_; - cricket::CodecParameterMap params_right_; -}; - -TEST_P(IsSameCodecParamsTest, Expected) { - EXPECT_EQ(expected_, - cricket::IsSameCodec(name_, params_left_, name_, params_right_)); -} - -TEST_P(IsSameCodecParamsTest, Commutative) { - EXPECT_EQ(expected_, - cricket::IsSameCodec(name_, params_right_, name_, params_left_)); -} - -IsSameCodecParamsTestCase MakeTestCase(cricket::CodecParameterMap left, - cricket::CodecParameterMap right) { - return std::make_tuple(left, right); -} - -const IsSameCodecParamsTestCase kH264ParamsSameTestCases[] = { - // Both have the same defaults. - MakeTestCase({}, {}), - // packetization-mode: 0 is the default. - MakeTestCase({{cricket::kH264FmtpPacketizationMode, "0"}}, {}), - // Non-default packetization-mode matches. - MakeTestCase({{cricket::kH264FmtpPacketizationMode, "1"}}, - {{cricket::kH264FmtpPacketizationMode, "1"}}), -}; -INSTANTIATE_TEST_SUITE_P( - H264_Same, - IsSameCodecParamsTest, - ::testing::Combine(::testing::Values("H264"), - ::testing::Values(true), - ::testing::ValuesIn(kH264ParamsSameTestCases))); - -const IsSameCodecParamsTestCase kH264ParamsNotSameTestCases[] = { - // packetization-mode does not match the default of "0". - MakeTestCase({{cricket::kH264FmtpPacketizationMode, "1"}}, {}), -}; -INSTANTIATE_TEST_SUITE_P( - H264_NotSame, - IsSameCodecParamsTest, - ::testing::Combine(::testing::Values("H264"), - ::testing::Values(false), - ::testing::ValuesIn(kH264ParamsNotSameTestCases))); - -const IsSameCodecParamsTestCase kVP9ParamsSameTestCases[] = { - // Both have the same defaults. - MakeTestCase({}, {}), - // profile-id: 0 is the default. - MakeTestCase({{webrtc::kVP9FmtpProfileId, "0"}}, {}), - // Non-default profile-id matches. - MakeTestCase({{webrtc::kVP9FmtpProfileId, "2"}}, - {{webrtc::kVP9FmtpProfileId, "2"}}), -}; -INSTANTIATE_TEST_SUITE_P( - VP9_Same, - IsSameCodecParamsTest, - ::testing::Combine(::testing::Values("VP9"), - ::testing::Values(true), - ::testing::ValuesIn(kVP9ParamsSameTestCases))); - -const IsSameCodecParamsTestCase kVP9ParamsNotSameTestCases[] = { - // profile-id missing from right. - MakeTestCase({{webrtc::kVP9FmtpProfileId, "2"}}, {}), -}; -INSTANTIATE_TEST_SUITE_P( - VP9_NotSame, - IsSameCodecParamsTest, - ::testing::Combine(::testing::Values("VP9"), - ::testing::Values(false), - ::testing::ValuesIn(kVP9ParamsNotSameTestCases))); diff --git a/media/base/delayable.h b/media/base/delayable.h index 90ce5d7089..f0344c5dec 100644 --- a/media/base/delayable.h +++ b/media/base/delayable.h @@ -18,7 +18,7 @@ namespace cricket { // Delayable is used by user code through ApplyConstraints algorithm. Its -// methods must take precendence over similar functional in |syncable.h|. +// methods must take precendence over similar functional in `syncable.h`. class Delayable { public: virtual ~Delayable() {} diff --git a/media/base/fake_frame_source.cc b/media/base/fake_frame_source.cc index 8a05536c83..61bc5857d9 100644 --- a/media/base/fake_frame_source.cc +++ b/media/base/fake_frame_source.cc @@ -52,7 +52,8 @@ webrtc::VideoFrame FakeFrameSource::GetFrameRotationApplied() { case webrtc::kVideoRotation_270: return GetFrame(height_, width_, webrtc::kVideoRotation_0, interval_us_); } - RTC_NOTREACHED() << "Invalid rotation value: " << static_cast(rotation_); + RTC_DCHECK_NOTREACHED() << "Invalid rotation value: " + << static_cast(rotation_); // Without this return, the Windows Visual Studio compiler complains // "not all control paths return a value". return GetFrame(); diff --git a/media/base/fake_media_engine.h b/media/base/fake_media_engine.h index e4f7b6659f..bf8a0592a5 100644 --- a/media/base/fake_media_engine.h +++ b/media/base/fake_media_engine.h @@ -275,7 +275,7 @@ class RtpHelper : public Base { } void OnPacketSent(const rtc::SentPacket& sent_packet) override {} void OnReadyToSend(bool ready) override { ready_to_send_ = ready; } - void OnNetworkRouteChanged(const std::string& transport_name, + void OnNetworkRouteChanged(absl::string_view transport_name, const rtc::NetworkRoute& network_route) override { last_network_route_ = network_route; ++num_network_route_changes_; diff --git a/media/base/fake_network_interface.h b/media/base/fake_network_interface.h index 45b7aa0fc0..043e559f28 100644 --- a/media/base/fake_network_interface.h +++ b/media/base/fake_network_interface.h @@ -17,6 +17,7 @@ #include "media/base/media_channel.h" #include "media/base/rtp_utils.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "rtc_base/byte_order.h" #include "rtc_base/checks.h" #include "rtc_base/copy_on_write_buffer.h" @@ -116,13 +117,12 @@ class FakeNetworkInterface : public MediaChannel::NetworkInterface, virtual bool SendPacket(rtc::CopyOnWriteBuffer* packet, const rtc::PacketOptions& options) RTC_LOCKS_EXCLUDED(mutex_) { - webrtc::MutexLock lock(&mutex_); - - uint32_t cur_ssrc = 0; - if (!GetRtpSsrc(packet->data(), packet->size(), &cur_ssrc)) { + if (!webrtc::IsRtpPacket(*packet)) { return false; } - sent_ssrcs_[cur_ssrc]++; + + webrtc::MutexLock lock(&mutex_); + sent_ssrcs_[webrtc::ParseRtpSsrc(*packet)]++; options_ = options; rtp_packets_.push_back(*packet); @@ -192,13 +192,8 @@ class FakeNetworkInterface : public MediaChannel::NetworkInterface, if (packets) { *packets = 0; } - uint32_t cur_ssrc = 0; for (size_t i = 0; i < rtp_packets_.size(); ++i) { - if (!GetRtpSsrc(rtp_packets_[i].data(), rtp_packets_[i].size(), - &cur_ssrc)) { - return; - } - if (ssrc == cur_ssrc) { + if (ssrc == webrtc::ParseRtpSsrc(rtp_packets_[i])) { if (bytes) { *bytes += static_cast(rtp_packets_[i].size()); } diff --git a/media/base/fake_rtp.cc b/media/base/fake_rtp.cc index 4f42821762..21322419e1 100644 --- a/media/base/fake_rtp.cc +++ b/media/base/fake_rtp.cc @@ -21,7 +21,7 @@ void CompareHeaderExtensions(const char* packet1, size_t packet1_size, const char* packet2, size_t packet2_size, - const std::vector encrypted_headers, + const std::vector& encrypted_headers, bool expect_equal) { // Sanity check: packets must be large enough to contain the RTP header and // extensions header. diff --git a/media/base/fake_rtp.h b/media/base/fake_rtp.h index f2578151ed..8a176038cb 100644 --- a/media/base/fake_rtp.h +++ b/media/base/fake_rtp.h @@ -295,7 +295,7 @@ void CompareHeaderExtensions(const char* packet1, size_t packet1_size, const char* packet2, size_t packet2_size, - const std::vector encrypted_headers, + const std::vector& encrypted_headers, bool expect_equal); #endif // MEDIA_BASE_FAKE_RTP_H_ diff --git a/media/base/h264_profile_level_id.cc b/media/base/h264_profile_level_id.cc deleted file mode 100644 index 6f9fa46694..0000000000 --- a/media/base/h264_profile_level_id.cc +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "media/base/h264_profile_level_id.h" - -// TODO(crbug.com/1187565): Remove this file once downstream projects stop -// depend on it. - -namespace webrtc { -namespace H264 { - -absl::optional ParseProfileLevelId(const char* str) { - return webrtc::ParseH264ProfileLevelId(str); -} - -absl::optional ParseSdpProfileLevelId( - const SdpVideoFormat::Parameters& params) { - return webrtc::ParseSdpForH264ProfileLevelId(params); -} - -absl::optional SupportedLevel(int max_frame_pixel_count, float max_fps) { - return webrtc::H264SupportedLevel(max_frame_pixel_count, max_fps); -} - -absl::optional ProfileLevelIdToString( - const ProfileLevelId& profile_level_id) { - return webrtc::H264ProfileLevelIdToString(profile_level_id); -} - -bool IsSameH264Profile(const SdpVideoFormat::Parameters& params1, - const SdpVideoFormat::Parameters& params2) { - return webrtc::H264IsSameProfile(params1, params2); -} - -} // namespace H264 -} // namespace webrtc diff --git a/media/base/h264_profile_level_id.h b/media/base/h264_profile_level_id.h deleted file mode 100644 index c85709faa9..0000000000 --- a/media/base/h264_profile_level_id.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef MEDIA_BASE_H264_PROFILE_LEVEL_ID_H_ -#define MEDIA_BASE_H264_PROFILE_LEVEL_ID_H_ - -#include - -#include "api/video_codecs/h264_profile_level_id.h" - -// TODO(crbug.com/1187565): Remove this file once downstream projects stop -// depend on it. - -namespace webrtc { -namespace H264 { - -typedef H264Profile Profile; -typedef H264Level Level; -typedef H264ProfileLevelId ProfileLevelId; - -constexpr H264Profile kProfileConstrainedBaseline = - H264Profile::kProfileConstrainedBaseline; -constexpr H264Profile kProfileBaseline = H264Profile::kProfileBaseline; -constexpr H264Profile kProfileMain = H264Profile::kProfileMain; -constexpr H264Profile kProfileConstrainedHigh = - H264Profile::kProfileConstrainedHigh; -constexpr H264Profile kProfileHigh = H264Profile::kProfileHigh; - -constexpr H264Level kLevel1_b = H264Level::kLevel1_b; -constexpr H264Level kLevel1 = H264Level::kLevel1; -constexpr H264Level kLevel1_1 = H264Level::kLevel1_1; -constexpr H264Level kLevel1_2 = H264Level::kLevel1_2; -constexpr H264Level kLevel1_3 = H264Level::kLevel1_3; -constexpr H264Level kLevel2 = H264Level::kLevel2; -constexpr H264Level kLevel2_1 = H264Level::kLevel2_1; -constexpr H264Level kLevel2_2 = H264Level::kLevel2_2; -constexpr H264Level kLevel3 = H264Level::kLevel3; -constexpr H264Level kLevel3_1 = H264Level::kLevel3_1; -constexpr H264Level kLevel3_2 = H264Level::kLevel3_2; -constexpr H264Level kLevel4 = H264Level::kLevel4; -constexpr H264Level kLevel4_1 = H264Level::kLevel4_1; -constexpr H264Level kLevel4_2 = H264Level::kLevel4_2; -constexpr H264Level kLevel5 = H264Level::kLevel5; -constexpr H264Level kLevel5_1 = H264Level::kLevel5_1; -constexpr H264Level kLevel5_2 = H264Level::kLevel5_2; - -// Parse profile level id that is represented as a string of 3 hex bytes. -// Nothing will be returned if the string is not a recognized H264 -// profile level id. -absl::optional ParseProfileLevelId(const char* str); - -// Parse profile level id that is represented as a string of 3 hex bytes -// contained in an SDP key-value map. A default profile level id will be -// returned if the profile-level-id key is missing. Nothing will be returned if -// the key is present but the string is invalid. -RTC_EXPORT absl::optional ParseSdpProfileLevelId( - const SdpVideoFormat::Parameters& params); - -// Given that a decoder supports up to a given frame size (in pixels) at up to a -// given number of frames per second, return the highest H.264 level where it -// can guarantee that it will be able to support all valid encoded streams that -// are within that level. -RTC_EXPORT absl::optional SupportedLevel(int max_frame_pixel_count, - float max_fps); - -// Returns canonical string representation as three hex bytes of the profile -// level id, or returns nothing for invalid profile level ids. -RTC_EXPORT absl::optional ProfileLevelIdToString( - const ProfileLevelId& profile_level_id); - -// Returns true if the parameters have the same H264 profile, i.e. the same -// H264::Profile (Baseline, High, etc). -RTC_EXPORT bool IsSameH264Profile(const SdpVideoFormat::Parameters& params1, - const SdpVideoFormat::Parameters& params2); - -} // namespace H264 -} // namespace webrtc -#endif // MEDIA_BASE_H264_PROFILE_LEVEL_ID_H_ diff --git a/media/base/media_channel.cc b/media/base/media_channel.cc index 01b043b828..1b11fcc4e8 100644 --- a/media/base/media_channel.cc +++ b/media/base/media_channel.cc @@ -26,14 +26,8 @@ VideoOptions::VideoOptions() : content_hint(VideoTrackInterface::ContentHint::kNone) {} VideoOptions::~VideoOptions() = default; -MediaChannel::MediaChannel(const MediaConfig& config, - TaskQueueBase* network_thread) - : enable_dscp_(config.enable_dscp), - network_safety_(PendingTaskSafetyFlag::CreateDetachedInactive()), - network_thread_(network_thread) {} - -MediaChannel::MediaChannel(TaskQueueBase* network_thread) - : enable_dscp_(false), +MediaChannel::MediaChannel(TaskQueueBase* network_thread, bool enable_dscp) + : enable_dscp_(enable_dscp), network_safety_(PendingTaskSafetyFlag::CreateDetachedInactive()), network_thread_(network_thread) {} @@ -95,6 +89,11 @@ bool MediaChannel::ExtmapAllowMixed() const { return extmap_allow_mixed_; } +bool MediaChannel::HasNetworkInterface() const { + RTC_DCHECK_RUN_ON(network_thread_); + return network_interface_ != nullptr; +} + void MediaChannel::SetEncoderToPacketizerFrameTransformer( uint32_t ssrc, rtc::scoped_refptr frame_transformer) {} @@ -116,7 +115,7 @@ bool MediaChannel::DscpEnabled() const { } // This is the DSCP value used for both RTP and RTCP channels if DSCP is -// enabled. It can be changed at any time via |SetPreferredDscp|. +// enabled. It can be changed at any time via `SetPreferredDscp`. rtc::DiffServCodePoint MediaChannel::PreferredDscp() const { RTC_DCHECK_RUN_ON(network_thread_); return preferred_dscp_; diff --git a/media/base/media_channel.h b/media/base/media_channel.h index 7b9a6f138c..2b0ef81277 100644 --- a/media/base/media_channel.h +++ b/media/base/media_channel.h @@ -28,6 +28,7 @@ #include "api/rtp_parameters.h" #include "api/transport/data_channel_transport_interface.h" #include "api/transport/rtp/rtp_source.h" +#include "api/units/time_delta.h" #include "api/video/video_content_type.h" #include "api/video/video_sink_interface.h" #include "api/video/video_source_interface.h" @@ -37,7 +38,6 @@ #include "common_video/include/quality_limitation_reason.h" #include "media/base/codec.h" #include "media/base/delayable.h" -#include "media/base/media_config.h" #include "media/base/media_constants.h" #include "media/base/stream_params.h" #include "modules/audio_processing/include/audio_processing_statistics.h" @@ -169,9 +169,8 @@ class MediaChannel { virtual ~NetworkInterface() {} }; - MediaChannel(const MediaConfig& config, - webrtc::TaskQueueBase* network_thread); - explicit MediaChannel(webrtc::TaskQueueBase* network_thread); + explicit MediaChannel(webrtc::TaskQueueBase* network_thread, + bool enable_dscp = false); virtual ~MediaChannel(); virtual cricket::MediaType media_type() const = 0; @@ -188,7 +187,7 @@ class MediaChannel { virtual void OnReadyToSend(bool ready) = 0; // Called when the network route used for sending packets changed. virtual void OnNetworkRouteChanged( - const std::string& transport_name, + absl::string_view transport_name, const rtc::NetworkRoute& network_route) = 0; // Creates a new outgoing media stream with SSRCs and CNAME as described // by sp. @@ -209,11 +208,14 @@ class MediaChannel { // Resets any cached StreamParams for an unsignaled RecvStream, and removes // any existing unsignaled streams. virtual void ResetUnsignaledRecvStream() = 0; - // Informs the media channel when the transport's demuxer criteria is updated. + // This is currently a workaround because of the demuxer state being managed + // across two separate threads. Once the state is consistently managed on + // the same thread (network), this workaround can be removed. + // These two notifications inform the media channel when the transport's + // demuxer criteria is being updated. // * OnDemuxerCriteriaUpdatePending() happens on the same thread that the // channel's streams are added and removed (worker thread). - // * OnDemuxerCriteriaUpdateComplete() happens on the thread where the demuxer - // lives (network thread). + // * OnDemuxerCriteriaUpdateComplete() happens on the same thread. // Because the demuxer is updated asynchronously, there is a window of time // where packets are arriving to the channel for streams that have already // been removed on the worker thread. It is important NOT to treat these as @@ -258,6 +260,10 @@ class MediaChannel { void SetExtmapAllowMixed(bool extmap_allow_mixed); bool ExtmapAllowMixed() const; + // Returns `true` if a non-null NetworkInterface pointer is held. + // Must be called on the network thread. + bool HasNetworkInterface() const; + virtual webrtc::RtpParameters GetRtpSendParameters(uint32_t ssrc) const = 0; virtual webrtc::RTCError SetRtpSendParameters( uint32_t ssrc, @@ -278,7 +284,7 @@ class MediaChannel { bool DscpEnabled() const; // This is the DSCP value used for both RTP and RTCP channels if DSCP is - // enabled. It can be changed at any time via |SetPreferredDscp|. + // enabled. It can be changed at any time via `SetPreferredDscp`. rtc::DiffServCodePoint PreferredDscp() const; void SetPreferredDscp(rtc::DiffServCodePoint new_dscp); @@ -374,6 +380,8 @@ struct MediaSenderInfo { uint64_t retransmitted_packets_sent = 0; // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-nackcount uint32_t nacks_rcvd = 0; + // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-targetbitrate + double target_bitrate = 0.0; int packets_lost = 0; float fraction_lost = 0.0f; int64_t rtt_ms = 0; @@ -471,7 +479,7 @@ struct VoiceReceiverInfo : public MediaReceiverInfo { int delay_estimate_ms = 0; int audio_level = 0; // Stats below correspond to similarly-named fields in the WebRTC stats spec. - // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats + // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats double total_output_energy = 0.0; uint64_t total_samples_received = 0; double total_output_duration = 0.0; @@ -483,6 +491,9 @@ struct VoiceReceiverInfo : public MediaReceiverInfo { uint64_t removed_samples_for_acceleration = 0; uint64_t fec_packets_received = 0; uint64_t fec_packets_discarded = 0; + // Stats below correspond to similarly-named fields in the WebRTC stats spec. + // https://w3c.github.io/webrtc-stats/#dom-rtcreceivedrtpstreamstats + uint64_t packets_discarded = 0; // Stats below DO NOT correspond directly to anything in the WebRTC stats // fraction of synthesized audio inserted through expansion. float expand_rate = 0.0f; @@ -528,6 +539,9 @@ struct VoiceReceiverInfo : public MediaReceiverInfo { uint32_t sender_reports_packets_sent = 0; uint64_t sender_reports_bytes_sent = 0; uint64_t sender_reports_reports_count = 0; + absl::optional round_trip_time; + webrtc::TimeDelta total_round_trip_time = webrtc::TimeDelta::Zero(); + int round_trip_time_measurements = 0; }; struct VideoSenderInfo : public MediaSenderInfo { @@ -540,7 +554,7 @@ struct VideoSenderInfo : public MediaSenderInfo { int send_frame_width = 0; int send_frame_height = 0; int frames = 0; - int framerate_input = 0; + double framerate_input = 0; int framerate_sent = 0; int aggregated_framerate_sent = 0; int nominal_bitrate = 0; @@ -652,7 +666,7 @@ struct BandwidthEstimationInfo { int64_t bucket_delay = 0; }; -// Maps from payload type to |RtpCodecParameters|. +// Maps from payload type to `RtpCodecParameters`. typedef std::map RtpCodecParametersMap; struct VoiceMediaInfo { @@ -765,17 +779,15 @@ struct AudioRecvParameters : RtpParameters {}; class VoiceMediaChannel : public MediaChannel, public Delayable { public: - explicit VoiceMediaChannel(webrtc::TaskQueueBase* network_thread) - : MediaChannel(network_thread) {} - VoiceMediaChannel(const MediaConfig& config, - webrtc::TaskQueueBase* network_thread) - : MediaChannel(config, network_thread) {} + VoiceMediaChannel(webrtc::TaskQueueBase* network_thread, + bool enable_dscp = false) + : MediaChannel(network_thread, enable_dscp) {} ~VoiceMediaChannel() override {} cricket::MediaType media_type() const override; virtual bool SetSendParameters(const AudioSendParameters& params) = 0; virtual bool SetRecvParameters(const AudioRecvParameters& params) = 0; - // Get the receive parameters for the incoming stream identified by |ssrc|. + // Get the receive parameters for the incoming stream identified by `ssrc`. virtual webrtc::RtpParameters GetRtpReceiveParameters( uint32_t ssrc) const = 0; // Retrieve the receive parameters for the default receive @@ -796,9 +808,9 @@ class VoiceMediaChannel : public MediaChannel, public Delayable { virtual bool SetDefaultOutputVolume(double volume) = 0; // Returns if the telephone-event has been negotiated. virtual bool CanInsertDtmf() = 0; - // Send a DTMF |event|. The DTMF out-of-band signal will be used. - // The |ssrc| should be either 0 or a valid send stream ssrc. - // The valid value for the |event| are 0 to 15 which corresponding to + // Send a DTMF `event`. The DTMF out-of-band signal will be used. + // The `ssrc` should be either 0 or a valid send stream ssrc. + // The valid value for the `event` are 0 to 15 which corresponding to // DTMF event 0-9, *, #, A-D. virtual bool InsertDtmf(uint32_t ssrc, int event, int duration) = 0; // Gets quality stats for the channel. @@ -837,17 +849,15 @@ struct VideoRecvParameters : RtpParameters {}; class VideoMediaChannel : public MediaChannel, public Delayable { public: - explicit VideoMediaChannel(webrtc::TaskQueueBase* network_thread) - : MediaChannel(network_thread) {} - VideoMediaChannel(const MediaConfig& config, - webrtc::TaskQueueBase* network_thread) - : MediaChannel(config, network_thread) {} + explicit VideoMediaChannel(webrtc::TaskQueueBase* network_thread, + bool enable_dscp = false) + : MediaChannel(network_thread, enable_dscp) {} ~VideoMediaChannel() override {} cricket::MediaType media_type() const override; virtual bool SetSendParameters(const VideoSendParameters& params) = 0; virtual bool SetRecvParameters(const VideoRecvParameters& params) = 0; - // Get the receive parameters for the incoming stream identified by |ssrc|. + // Get the receive parameters for the incoming stream identified by `ssrc`. virtual webrtc::RtpParameters GetRtpReceiveParameters( uint32_t ssrc) const = 0; // Retrieve the receive parameters for the default receive @@ -858,7 +868,7 @@ class VideoMediaChannel : public MediaChannel, public Delayable { // Starts or stops transmission (and potentially capture) of local video. virtual bool SetSend(bool send) = 0; // Configure stream for sending and register a source. - // The |ssrc| must correspond to a registered send stream. + // The `ssrc` must correspond to a registered send stream. virtual bool SetVideoSend( uint32_t ssrc, const VideoOptions* options, @@ -880,13 +890,13 @@ class VideoMediaChannel : public MediaChannel, public Delayable { virtual void FillBitrateInfo(BandwidthEstimationInfo* bwe_info) = 0; // Gets quality stats for the channel. virtual bool GetStats(VideoMediaInfo* info) = 0; - // Set recordable encoded frame callback for |ssrc| + // Set recordable encoded frame callback for `ssrc` virtual void SetRecordableEncodedFrameCallback( uint32_t ssrc, std::function callback) = 0; - // Clear recordable encoded frame callback for |ssrc| + // Clear recordable encoded frame callback for `ssrc` virtual void ClearRecordableEncodedFrameCallback(uint32_t ssrc) = 0; - // Cause generation of a keyframe for |ssrc| + // Cause generation of a keyframe for `ssrc` virtual void GenerateKeyFrame(uint32_t ssrc) = 0; virtual std::vector GetSources(uint32_t ssrc) const = 0; diff --git a/media/base/media_constants.cc b/media/base/media_constants.cc index 17a8a83bd0..da5e7a8adf 100644 --- a/media/base/media_constants.cc +++ b/media/base/media_constants.cc @@ -39,6 +39,9 @@ const char kCodecParamRtxTime[] = "rtx-time"; const char kCodecParamAssociatedPayloadType[] = "apt"; const char kCodecParamAssociatedCodecName[] = "acn"; +// Parameters that do not follow the key-value convention +// are treated as having the empty string as key. +const char kCodecParamNotInNameValueFormat[] = ""; const char kOpusCodecName[] = "opus"; const char kIsacCodecName[] = "ISAC"; @@ -99,7 +102,7 @@ const char kComfortNoiseCodecName[] = "CN"; const char kVp8CodecName[] = "VP8"; const char kVp9CodecName[] = "VP9"; -const char kAv1CodecName[] = "AV1X"; +const char kAv1CodecName[] = "AV1"; const char kH264CodecName[] = "H264"; // RFC 6184 RTP Payload Format for H.264 video diff --git a/media/base/media_constants.h b/media/base/media_constants.h index bf7f0c3047..16c5db92b9 100644 --- a/media/base/media_constants.h +++ b/media/base/media_constants.h @@ -42,6 +42,7 @@ extern const char kCodecParamRtxTime[]; extern const char kCodecParamAssociatedPayloadType[]; extern const char kCodecParamAssociatedCodecName[]; +extern const char kCodecParamNotInNameValueFormat[]; extern const char kOpusCodecName[]; extern const char kIsacCodecName[]; @@ -67,7 +68,7 @@ extern const char kCodecParamMaxPlaybackRate[]; extern const char kParamValueTrue[]; // Parameters are stored as parameter/value pairs. For parameters who do not -// have a value, |kParamValueEmpty| should be used as value. +// have a value, `kParamValueEmpty` should be used as value. extern const char kParamValueEmpty[]; // opus parameters. diff --git a/media/base/media_engine.cc b/media/base/media_engine.cc index 36a9694cfb..21c3787382 100644 --- a/media/base/media_engine.cc +++ b/media/base/media_engine.cc @@ -106,15 +106,6 @@ webrtc::RTCError CheckRtpParametersValues( "num_temporal_layers to an invalid number."); } } - if (i > 0 && (rtp_parameters.encodings[i].num_temporal_layers != - rtp_parameters.encodings[i - 1].num_temporal_layers)) { - LOG_AND_RETURN_ERROR( - RTCErrorType::INVALID_MODIFICATION, - "Attempted to set RtpParameters num_temporal_layers " - "at encoding layer i: " + - rtc::ToString(i) + - " to a different value than other encoding layers."); - } } return webrtc::RTCError::OK(); diff --git a/media/base/media_engine.h b/media/base/media_engine.h index 6f47127f30..1778104a30 100644 --- a/media/base/media_engine.h +++ b/media/base/media_engine.h @@ -24,6 +24,7 @@ #include "call/audio_state.h" #include "media/base/codec.h" #include "media/base/media_channel.h" +#include "media/base/media_config.h" #include "media/base/video_common.h" #include "rtc_base/system/file_wrapper.h" @@ -63,7 +64,9 @@ class VoiceEngineInterface : public RtpHeaderExtensionQueryInterface { public: VoiceEngineInterface() = default; virtual ~VoiceEngineInterface() = default; - RTC_DISALLOW_COPY_AND_ASSIGN(VoiceEngineInterface); + + VoiceEngineInterface(const VoiceEngineInterface&) = delete; + VoiceEngineInterface& operator=(const VoiceEngineInterface&) = delete; // Initialization // Starts the engine. @@ -97,7 +100,9 @@ class VideoEngineInterface : public RtpHeaderExtensionQueryInterface { public: VideoEngineInterface() = default; virtual ~VideoEngineInterface() = default; - RTC_DISALLOW_COPY_AND_ASSIGN(VideoEngineInterface); + + VideoEngineInterface(const VideoEngineInterface&) = delete; + VideoEngineInterface& operator=(const VideoEngineInterface&) = delete; // Creates a video media channel, paired with the specified voice channel. // Returns NULL on failure. diff --git a/media/base/rtp_utils.cc b/media/base/rtp_utils.cc index 9f90c468f7..c630cbc7e4 100644 --- a/media/base/rtp_utils.cc +++ b/media/base/rtp_utils.cc @@ -25,10 +25,6 @@ namespace cricket { -static const size_t kRtpPayloadTypeOffset = 1; -static const size_t kRtpSeqNumOffset = 2; -static const size_t kRtpTimestampOffset = 4; -static const size_t kRtpSsrcOffset = 8; static const size_t kRtcpPayloadTypeOffset = 1; static const size_t kRtpExtensionHeaderLen = 4; static const size_t kAbsSendTimeExtensionLen = 3; @@ -62,7 +58,7 @@ void UpdateAbsSendTimeExtensionValue(uint8_t* extension_data, // | ID | len=2 | absolute send time | // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ if (length != kAbsSendTimeExtensionLen) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return; } @@ -73,7 +69,7 @@ void UpdateAbsSendTimeExtensionValue(uint8_t* extension_data, extension_data[2] = static_cast(send_time); } -// Assumes |length| is actual packet length + tag length. Updates HMAC at end of +// Assumes `length` is actual packet length + tag length. Updates HMAC at end of // the RTP packet. void UpdateRtpAuthTag(uint8_t* rtp, size_t length, @@ -88,7 +84,7 @@ void UpdateRtpAuthTag(uint8_t* rtp, // ROC (rollover counter) is at the beginning of the auth tag. const size_t kRocLength = 4; if (tag_length < kRocLength || tag_length > length) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return; } @@ -109,7 +105,7 @@ void UpdateRtpAuthTag(uint8_t* rtp, auth_required_length, output, sizeof(output)); if (result < tag_length) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return; } @@ -126,57 +122,8 @@ bool GetUint8(const void* data, size_t offset, int* value) { return true; } -bool GetUint16(const void* data, size_t offset, int* value) { - if (!data || !value) { - return false; - } - *value = static_cast( - rtc::GetBE16(static_cast(data) + offset)); - return true; -} - -bool GetUint32(const void* data, size_t offset, uint32_t* value) { - if (!data || !value) { - return false; - } - *value = rtc::GetBE32(static_cast(data) + offset); - return true; -} - } // namespace -bool GetRtpPayloadType(const void* data, size_t len, int* value) { - if (len < kMinRtpPacketLen) { - return false; - } - if (!GetUint8(data, kRtpPayloadTypeOffset, value)) { - return false; - } - *value &= 0x7F; - return true; -} - -bool GetRtpSeqNum(const void* data, size_t len, int* value) { - if (len < kMinRtpPacketLen) { - return false; - } - return GetUint16(data, kRtpSeqNumOffset, value); -} - -bool GetRtpTimestamp(const void* data, size_t len, uint32_t* value) { - if (len < kMinRtpPacketLen) { - return false; - } - return GetUint32(data, kRtpTimestampOffset, value); -} - -bool GetRtpSsrc(const void* data, size_t len, uint32_t* value) { - if (len < kMinRtpPacketLen) { - return false; - } - return GetUint32(data, kRtpSsrcOffset, value); -} - bool GetRtcpType(const void* data, size_t len, int* value) { if (len < kMinRtcpPacketLen) { return false; @@ -412,7 +359,7 @@ bool ApplyPacketOptions(uint8_t* data, RTC_DCHECK(data); RTC_DCHECK(length); - // if there is no valid |rtp_sendtime_extension_id| and |srtp_auth_key| in + // if there is no valid `rtp_sendtime_extension_id` and `srtp_auth_key` in // PacketOptions, nothing to be updated in this packet. if (packet_time_params.rtp_sendtime_extension_id == -1 && packet_time_params.srtp_auth_key.empty()) { @@ -425,7 +372,7 @@ bool ApplyPacketOptions(uint8_t* data, size_t rtp_start_pos; size_t rtp_length; if (!UnwrapTurnPacket(data, length, &rtp_start_pos, &rtp_length)) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return false; } @@ -433,7 +380,7 @@ bool ApplyPacketOptions(uint8_t* data, auto packet = rtc::MakeArrayView(data + rtp_start_pos, rtp_length); if (!webrtc::IsRtpPacket(packet) || !ValidateRtpHeader(data + rtp_start_pos, rtp_length, nullptr)) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return false; } diff --git a/media/base/rtp_utils.h b/media/base/rtp_utils.h index f6b5dbc9f0..a501fd7af3 100644 --- a/media/base/rtp_utils.h +++ b/media/base/rtp_utils.h @@ -42,11 +42,6 @@ enum class RtpPacketType { kUnknown, }; -bool GetRtpPayloadType(const void* data, size_t len, int* value); -bool GetRtpSeqNum(const void* data, size_t len, int* value); -bool GetRtpTimestamp(const void* data, size_t len, uint32_t* value); -bool GetRtpSsrc(const void* data, size_t len, uint32_t* value); - bool GetRtcpType(const void* data, size_t len, int* value); bool GetRtcpSsrc(const void* data, size_t len, uint32_t* value); @@ -55,10 +50,10 @@ RtpPacketType InferRtpPacketType(rtc::ArrayView packet); // True if |payload type| is 0-127. bool IsValidRtpPayloadType(int payload_type); -// True if |size| is appropriate for the indicated packet type. +// True if `size` is appropriate for the indicated packet type. bool IsValidRtpPacketSize(RtpPacketType packet_type, size_t size); -// Returns "RTCP", "RTP" or "Unknown" according to |packet_type|. +// Returns "RTCP", "RTP" or "Unknown" according to `packet_type`. absl::string_view RtpPacketTypeToString(RtpPacketType packet_type); // Verifies that a packet has a valid RTP header. @@ -72,7 +67,7 @@ bool UpdateRtpAbsSendTimeExtension(uint8_t* rtp, int extension_id, uint64_t time_us); -// Applies specified |options| to the packet. It updates the absolute send time +// Applies specified `options` to the packet. It updates the absolute send time // extension header if it is present present then updates HMAC. bool RTC_EXPORT ApplyPacketOptions(uint8_t* data, diff --git a/media/base/rtp_utils_unittest.cc b/media/base/rtp_utils_unittest.cc index 14599abca2..a594f944c0 100644 --- a/media/base/rtp_utils_unittest.cc +++ b/media/base/rtp_utils_unittest.cc @@ -21,8 +21,6 @@ namespace cricket { -static const uint8_t kRtpPacketWithMarker[] = { - 0x80, 0x80, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; static const uint8_t kInvalidPacket[] = {0x80, 0x00}; // PT = 206, FMT = 1, Sender SSRC = 0x1111, Media SSRC = 0x1111 @@ -69,9 +67,9 @@ static uint8_t kRtpMsgWithTwoByteAbsSendTimeExtension[] = { }; // Index of AbsSendTimeExtn data in message -// |kRtpMsgWithOneByteAbsSendTimeExtension|. +// `kRtpMsgWithOneByteAbsSendTimeExtension`. static const int kAstIndexInOneByteRtpMsg = 21; -// and in message |kRtpMsgWithTwoByteAbsSendTimeExtension|. +// and in message `kRtpMsgWithTwoByteAbsSendTimeExtension`. static const int kAstIndexInTwoByteRtpMsg = 21; static const rtc::ArrayView kPcmuFrameArrayView = @@ -84,32 +82,6 @@ static const rtc::ArrayView kInvalidPacketArrayView = rtc::MakeArrayView(reinterpret_cast(kInvalidPacket), sizeof(kInvalidPacket)); -TEST(RtpUtilsTest, GetRtp) { - int pt; - EXPECT_TRUE(GetRtpPayloadType(kPcmuFrame, sizeof(kPcmuFrame), &pt)); - EXPECT_EQ(0, pt); - EXPECT_TRUE(GetRtpPayloadType(kRtpPacketWithMarker, - sizeof(kRtpPacketWithMarker), &pt)); - EXPECT_EQ(0, pt); - - int seq_num; - EXPECT_TRUE(GetRtpSeqNum(kPcmuFrame, sizeof(kPcmuFrame), &seq_num)); - EXPECT_EQ(1, seq_num); - - uint32_t ts; - EXPECT_TRUE(GetRtpTimestamp(kPcmuFrame, sizeof(kPcmuFrame), &ts)); - EXPECT_EQ(0u, ts); - - uint32_t ssrc; - EXPECT_TRUE(GetRtpSsrc(kPcmuFrame, sizeof(kPcmuFrame), &ssrc)); - EXPECT_EQ(1u, ssrc); - - EXPECT_FALSE(GetRtpPayloadType(kInvalidPacket, sizeof(kInvalidPacket), &pt)); - EXPECT_FALSE(GetRtpSeqNum(kInvalidPacket, sizeof(kInvalidPacket), &seq_num)); - EXPECT_FALSE(GetRtpTimestamp(kInvalidPacket, sizeof(kInvalidPacket), &ts)); - EXPECT_FALSE(GetRtpSsrc(kInvalidPacket, sizeof(kInvalidPacket), &ssrc)); -} - TEST(RtpUtilsTest, GetRtcp) { int pt; EXPECT_TRUE(GetRtcpType(kRtcpReport, sizeof(kRtcpReport), &pt)); diff --git a/media/base/sdp_video_format_utils.h b/media/base/sdp_video_format_utils.h index 6671c182ac..80c1e4d501 100644 --- a/media/base/sdp_video_format_utils.h +++ b/media/base/sdp_video_format_utils.h @@ -17,18 +17,18 @@ namespace webrtc { // Generate codec parameters that will be used as answer in an SDP negotiation // based on local supported parameters and remote offered parameters. Both -// |local_supported_params|, |remote_offered_params|, and |answer_params| +// `local_supported_params`, `remote_offered_params`, and `answer_params` // represent sendrecv media descriptions, i.e they are a mix of both encode and -// decode capabilities. In theory, when the profile in |local_supported_params| -// represent a strict superset of the profile in |remote_offered_params|, we -// could limit the profile in |answer_params| to the profile in -// |remote_offered_params|. However, to simplify the code, each supported H264 +// decode capabilities. In theory, when the profile in `local_supported_params` +// represent a strict superset of the profile in `remote_offered_params`, we +// could limit the profile in `answer_params` to the profile in +// `remote_offered_params`. However, to simplify the code, each supported H264 // profile should be listed explicitly in the list of local supported codecs, // even if they are redundant. Then each local codec in the list should be // tested one at a time against the remote codec, and only when the profiles are // equal should this function be called. Therefore, this function does not need -// to handle profile intersection, and the profile of |local_supported_params| -// and |remote_offered_params| must be equal before calling this function. The +// to handle profile intersection, and the profile of `local_supported_params` +// and `remote_offered_params` must be equal before calling this function. The // parameters that are used when negotiating are the level part of // profile-level-id and level-asymmetry-allowed. void H264GenerateProfileLevelIdForAnswer( diff --git a/media/base/stream_params.cc b/media/base/stream_params.cc index db781acfc7..0fe1be6ac7 100644 --- a/media/base/stream_params.cc +++ b/media/base/stream_params.cc @@ -111,7 +111,7 @@ StreamParams& StreamParams::operator=(const StreamParams&) = default; StreamParams& StreamParams::operator=(StreamParams&&) = default; bool StreamParams::operator==(const StreamParams& other) const { - return (groupid == other.groupid && id == other.id && ssrcs == other.ssrcs && + return (id == other.id && ssrcs == other.ssrcs && ssrc_groups == other.ssrc_groups && cname == other.cname && stream_ids_ == other.stream_ids_ && // RIDs are not required to be in the same order for equality. @@ -122,9 +122,6 @@ std::string StreamParams::ToString() const { char buf[2 * 1024]; rtc::SimpleStringBuilder sb(buf); sb << "{"; - if (!groupid.empty()) { - sb << "groupid:" << groupid << ";"; - } if (!id.empty()) { sb << "id:" << id << ";"; } diff --git a/media/base/stream_params.h b/media/base/stream_params.h index b8c37706df..c9c8a09592 100644 --- a/media/base/stream_params.h +++ b/media/base/stream_params.h @@ -54,7 +54,6 @@ #include "absl/algorithm/container.h" #include "media/base/rid_description.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/unique_id_generator.h" namespace cricket { @@ -183,11 +182,6 @@ struct StreamParams { std::string ToString() const; - // Resource of the MUC jid of the participant of with this stream. - // For 1:1 calls, should be left empty (which means remote streams - // and local streams should not be mixed together). This is not used - // internally and should be deprecated. - std::string groupid; // A unique identifier of the StreamParams object. When the SDP is created, // this comes from the track ID of the sender that the StreamParams object // is associated with. @@ -224,26 +218,22 @@ struct StreamParams { std::vector rids_; }; -// A Stream can be selected by either groupid+id or ssrc. +// A Stream can be selected by either id or ssrc. struct StreamSelector { explicit StreamSelector(uint32_t ssrc) : ssrc(ssrc) {} - StreamSelector(const std::string& groupid, const std::string& streamid) - : ssrc(0), groupid(groupid), streamid(streamid) {} - explicit StreamSelector(const std::string& streamid) : ssrc(0), streamid(streamid) {} bool Matches(const StreamParams& stream) const { if (ssrc == 0) { - return stream.groupid == groupid && stream.id == streamid; + return stream.id == streamid; } else { return stream.has_ssrc(ssrc); } } uint32_t ssrc; - std::string groupid; std::string streamid; }; @@ -274,19 +264,15 @@ inline const StreamParams* GetStreamBySsrc(const StreamParamsVec& streams, } inline const StreamParams* GetStreamByIds(const StreamParamsVec& streams, - const std::string& groupid, const std::string& id) { - return GetStream(streams, [&groupid, &id](const StreamParams& sp) { - return sp.groupid == groupid && sp.id == id; - }); + return GetStream(streams, + [&id](const StreamParams& sp) { return sp.id == id; }); } inline StreamParams* GetStreamByIds(StreamParamsVec& streams, - const std::string& groupid, const std::string& id) { - return GetStream(streams, [&groupid, &id](const StreamParams& sp) { - return sp.groupid == groupid && sp.id == id; - }); + return GetStream(streams, + [&id](const StreamParams& sp) { return sp.id == id; }); } inline const StreamParams* GetStream(const StreamParamsVec& streams, @@ -318,11 +304,9 @@ inline bool RemoveStreamBySsrc(StreamParamsVec* streams, uint32_t ssrc) { streams, [&ssrc](const StreamParams& sp) { return sp.has_ssrc(ssrc); }); } inline bool RemoveStreamByIds(StreamParamsVec* streams, - const std::string& groupid, const std::string& id) { - return RemoveStream(streams, [&groupid, &id](const StreamParams& sp) { - return sp.groupid == groupid && sp.id == id; - }); + return RemoveStream(streams, + [&id](const StreamParams& sp) { return sp.id == id; }); } } // namespace cricket diff --git a/media/base/test_utils.h b/media/base/test_utils.h index 46783a17f5..22bda4f12a 100644 --- a/media/base/test_utils.h +++ b/media/base/test_utils.h @@ -35,7 +35,7 @@ inline std::vector MakeVector(const T a[], size_t s) { } #define MAKE_VECTOR(a) cricket::MakeVector(a, arraysize(a)) -// Checks whether |codecs| contains |codec|; checks using Codec::Matches(). +// Checks whether `codecs` contains `codec`; checks using Codec::Matches(). template bool ContainsMatchingCodec(const std::vector& codecs, const C& codec) { typename std::vector::const_iterator it; @@ -47,11 +47,11 @@ bool ContainsMatchingCodec(const std::vector& codecs, const C& codec) { return false; } -// Create Simulcast StreamParams with given |ssrcs| and |cname|. +// Create Simulcast StreamParams with given `ssrcs` and `cname`. cricket::StreamParams CreateSimStreamParams(const std::string& cname, const std::vector& ssrcs); -// Create Simulcast stream with given |ssrcs| and |rtx_ssrcs|. -// The number of |rtx_ssrcs| must match number of |ssrcs|. +// Create Simulcast stream with given `ssrcs` and `rtx_ssrcs`. +// The number of `rtx_ssrcs` must match number of `ssrcs`. cricket::StreamParams CreateSimWithRtxStreamParams( const std::string& cname, const std::vector& ssrcs, diff --git a/media/base/video_adapter.cc b/media/base/video_adapter.cc index ddcf4cac89..4785dfcfe2 100644 --- a/media/base/video_adapter.cc +++ b/media/base/video_adapter.cc @@ -36,14 +36,14 @@ struct Fraction { } // Determines number of output pixels if both width and height of an input of - // |input_pixels| pixels is scaled with the fraction numerator / denominator. + // `input_pixels` pixels is scaled with the fraction numerator / denominator. int scale_pixel_count(int input_pixels) { return (numerator * numerator * input_pixels) / (denominator * denominator); } }; -// Round |value_to_round| to a multiple of |multiple|. Prefer rounding upwards, -// but never more than |max_value|. +// Round `value_to_round` to a multiple of `multiple`. Prefer rounding upwards, +// but never more than `max_value`. int roundUp(int value_to_round, int multiple, int max_value) { const int rounded_value = (value_to_round + multiple - 1) / multiple * multiple; @@ -51,8 +51,8 @@ int roundUp(int value_to_round, int multiple, int max_value) { : (max_value / multiple * multiple); } -// Generates a scale factor that makes |input_pixels| close to |target_pixels|, -// but no higher than |max_pixels|. +// Generates a scale factor that makes `input_pixels` close to `target_pixels`, +// but no higher than `max_pixels`. Fraction FindScale(int input_width, int input_height, int target_pixels, @@ -73,7 +73,7 @@ Fraction FindScale(int input_width, Fraction best_scale = Fraction{1, 1}; if (variable_start_scale_factor) { - // Start scaling down by 2/3 depending on |input_width| and |input_height|. + // Start scaling down by 2/3 depending on `input_width` and `input_height`. if (input_width % 3 == 0 && input_height % 3 == 0) { // 2/3 (then alternates 3/4, 2/3, 3/4,...). current_scale = Fraction{6, 6}; @@ -144,43 +144,13 @@ VideoAdapter::VideoAdapter() : VideoAdapter(1) {} VideoAdapter::~VideoAdapter() {} -bool VideoAdapter::KeepFrame(int64_t in_timestamp_ns) { +bool VideoAdapter::DropFrame(int64_t in_timestamp_ns) { int max_fps = max_framerate_request_; if (max_fps_) max_fps = std::min(max_fps, *max_fps_); - if (max_fps <= 0) - return false; - - // If |max_framerate_request_| is not set, it will default to maxint, which - // will lead to a frame_interval_ns rounded to 0. - int64_t frame_interval_ns = rtc::kNumNanosecsPerSec / max_fps; - if (frame_interval_ns <= 0) { - // Frame rate throttling not enabled. - return true; - } - - if (next_frame_timestamp_ns_) { - // Time until next frame should be outputted. - const int64_t time_until_next_frame_ns = - (*next_frame_timestamp_ns_ - in_timestamp_ns); - - // Continue if timestamp is within expected range. - if (std::abs(time_until_next_frame_ns) < 2 * frame_interval_ns) { - // Drop if a frame shouldn't be outputted yet. - if (time_until_next_frame_ns > 0) - return false; - // Time to output new frame. - *next_frame_timestamp_ns_ += frame_interval_ns; - return true; - } - } - - // First timestamp received or timestamp is way outside expected range, so - // reset. Set first timestamp target to just half the interval to prefer - // keeping frames in case of jitter. - next_frame_timestamp_ns_ = in_timestamp_ns + frame_interval_ns / 2; - return true; + framerate_controller_.SetMaxFramerate(max_fps); + return framerate_controller_.ShouldDropFrame(in_timestamp_ns); } bool VideoAdapter::AdaptFrameResolution(int in_width, @@ -214,7 +184,7 @@ bool VideoAdapter::AdaptFrameResolution(int in_width, std::min(resolution_request_target_pixel_count_, max_pixel_count); // Drop the input frame if necessary. - if (max_pixel_count <= 0 || !KeepFrame(in_timestamp_ns)) { + if (max_pixel_count <= 0 || DropFrame(in_timestamp_ns)) { // Show VAdapt log every 90 frames dropped. (3 seconds) if ((frames_in_ - frames_out_) % 90 == 0) { // TODO(fbarchard): Reduce to LS_VERBOSE when adapter info is not needed @@ -335,7 +305,7 @@ void VideoAdapter::OnOutputFormatRequest( target_portrait_aspect_ratio_ = target_portrait_aspect_ratio; max_portrait_pixel_count_ = max_portrait_pixel_count; max_fps_ = max_fps; - next_frame_timestamp_ns_ = absl::nullopt; + framerate_controller_.Reset(); } void VideoAdapter::OnSinkWants(const rtc::VideoSinkWants& sink_wants) { @@ -356,7 +326,7 @@ int VideoAdapter::GetTargetPixels() const { float VideoAdapter::GetMaxFramerate() const { webrtc::MutexLock lock(&mutex_); - // Minimum of |max_fps_| and |max_framerate_request_| is used to throttle + // Minimum of `max_fps_` and `max_framerate_request_` is used to throttle // frame-rate. int framerate = std::min(max_framerate_request_, max_fps_.value_or(max_framerate_request_)); diff --git a/media/base/video_adapter.h b/media/base/video_adapter.h index 3ed58954e9..071330f330 100644 --- a/media/base/video_adapter.h +++ b/media/base/video_adapter.h @@ -17,8 +17,8 @@ #include "absl/types/optional.h" #include "api/video/video_source_interface.h" +#include "common_video/framerate_controller.h" #include "media/base/video_common.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/synchronization/mutex.h" #include "rtc_base/system/rtc_export.h" #include "rtc_base/thread_annotations.h" @@ -33,10 +33,13 @@ class RTC_EXPORT VideoAdapter { public: VideoAdapter(); // The source requests output frames whose width and height are divisible - // by |source_resolution_alignment|. + // by `source_resolution_alignment`. explicit VideoAdapter(int source_resolution_alignment); virtual ~VideoAdapter(); + VideoAdapter(const VideoAdapter&) = delete; + VideoAdapter& operator=(const VideoAdapter&) = delete; + // Return the adapted resolution and cropping parameters given the // input resolution. The input frame should first be cropped, then // scaled to the final output resolution. Returns true if the frame @@ -52,7 +55,7 @@ class RTC_EXPORT VideoAdapter { // DEPRECATED. Please use OnOutputFormatRequest below. // TODO(asapersson): Remove this once it is no longer used. // Requests the output frame size and frame interval from - // |AdaptFrameResolution| to not be larger than |format|. Also, the input + // `AdaptFrameResolution` to not be larger than `format`. Also, the input // frame size will be cropped to match the requested aspect ratio. The // requested aspect ratio is orientation agnostic and will be adjusted to // maintain the input orientation, so it doesn't matter if e.g. 1280x720 or @@ -61,13 +64,13 @@ class RTC_EXPORT VideoAdapter { void OnOutputFormatRequest(const absl::optional& format) RTC_LOCKS_EXCLUDED(mutex_); - // Requests output frame size and frame interval from |AdaptFrameResolution|. - // |target_aspect_ratio|: The input frame size will be cropped to match the + // Requests output frame size and frame interval from `AdaptFrameResolution`. + // `target_aspect_ratio`: The input frame size will be cropped to match the // requested aspect ratio. The aspect ratio is orientation agnostic and will // be adjusted to maintain the input orientation (i.e. it doesn't matter if // e.g. <1280,720> or <720,1280> is requested). - // |max_pixel_count|: The maximum output frame size. - // |max_fps|: The maximum output framerate. + // `max_pixel_count`: The maximum output frame size. + // `max_fps`: The maximum output framerate. // Note: Should be called from the source only. void OnOutputFormatRequest( const absl::optional>& target_aspect_ratio, @@ -85,30 +88,30 @@ class RTC_EXPORT VideoAdapter { const absl::optional& max_portrait_pixel_count, const absl::optional& max_fps) RTC_LOCKS_EXCLUDED(mutex_); - // Requests the output frame size from |AdaptFrameResolution| to have as close - // as possible to |sink_wants.target_pixel_count| pixels (if set) - // but no more than |sink_wants.max_pixel_count|. - // |sink_wants.max_framerate_fps| is essentially analogous to - // |sink_wants.max_pixel_count|, but for framerate rather than resolution. - // Set |sink_wants.max_pixel_count| and/or |sink_wants.max_framerate_fps| to + // Requests the output frame size from `AdaptFrameResolution` to have as close + // as possible to `sink_wants.target_pixel_count` pixels (if set) + // but no more than `sink_wants.max_pixel_count`. + // `sink_wants.max_framerate_fps` is essentially analogous to + // `sink_wants.max_pixel_count`, but for framerate rather than resolution. + // Set `sink_wants.max_pixel_count` and/or `sink_wants.max_framerate_fps` to // std::numeric_limit::max() if no upper limit is desired. // The sink resolution alignment requirement is given by - // |sink_wants.resolution_alignment|. + // `sink_wants.resolution_alignment`. // Note: Should be called from the sink only. void OnSinkWants(const rtc::VideoSinkWants& sink_wants) RTC_LOCKS_EXCLUDED(mutex_); // Returns maximum image area, which shouldn't impose any adaptations. - // Can return |numeric_limits::max()| if no limit is set. + // Can return `numeric_limits::max()` if no limit is set. int GetTargetPixels() const; // Returns current frame-rate limit. - // Can return |numeric_limits::infinity()| if no limit is set. + // Can return `numeric_limits::infinity()` if no limit is set. float GetMaxFramerate() const; private: // Determine if frame should be dropped based on input fps and requested fps. - bool KeepFrame(int64_t in_timestamp_ns) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + bool DropFrame(int64_t in_timestamp_ns) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); int frames_in_ RTC_GUARDED_BY(mutex_); // Number of input frames. int frames_out_ RTC_GUARDED_BY(mutex_); // Number of output frames. @@ -123,13 +126,10 @@ class RTC_EXPORT VideoAdapter { // The fixed source resolution alignment requirement. const int source_resolution_alignment_; // The currently applied resolution alignment, as given by the requirements: - // - the fixed |source_resolution_alignment_|; and - // - the latest |sink_wants.resolution_alignment|. + // - the fixed `source_resolution_alignment_`; and + // - the latest `sink_wants.resolution_alignment`. int resolution_alignment_ RTC_GUARDED_BY(mutex_); - // The target timestamp for the next frame based on requested format. - absl::optional next_frame_timestamp_ns_ RTC_GUARDED_BY(mutex_); - // Max number of pixels/fps requested via calls to OnOutputFormatRequest, // OnResolutionFramerateRequest respectively. // The adapted output format is the minimum of these. @@ -144,10 +144,10 @@ class RTC_EXPORT VideoAdapter { int resolution_request_max_pixel_count_ RTC_GUARDED_BY(mutex_); int max_framerate_request_ RTC_GUARDED_BY(mutex_); + webrtc::FramerateController framerate_controller_ RTC_GUARDED_BY(mutex_); + // The critical section to protect the above variables. mutable webrtc::Mutex mutex_; - - RTC_DISALLOW_COPY_AND_ASSIGN(VideoAdapter); }; } // namespace cricket diff --git a/media/base/video_adapter_unittest.cc b/media/base/video_adapter_unittest.cc index cbea8178ba..347e24d905 100644 --- a/media/base/video_adapter_unittest.cc +++ b/media/base/video_adapter_unittest.cc @@ -195,43 +195,8 @@ TEST_P(VideoAdapterTest, AdaptFramerateToHalf) { // Capture 10 frames and verify that every other frame is dropped. The first // frame should not be dropped. - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 1); - EXPECT_EQ(0, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 2); - EXPECT_EQ(1, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 3); - EXPECT_EQ(1, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 4); - EXPECT_EQ(2, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 5); - EXPECT_EQ(2, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 6); - EXPECT_EQ(3, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 7); - EXPECT_EQ(3, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 8); - EXPECT_EQ(4, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 9); - EXPECT_EQ(4, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); + for (int i = 0; i < 10; ++i) + adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 10); EXPECT_EQ(5, adapter_wrapper_->GetStats().dropped_frames); } @@ -244,43 +209,8 @@ TEST_P(VideoAdapterTest, AdaptFramerateToTwoThirds) { // Capture 10 frames and verify that every third frame is dropped. The first // frame should not be dropped. - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 1); - EXPECT_EQ(0, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 2); - EXPECT_EQ(0, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 3); - EXPECT_EQ(1, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 4); - EXPECT_EQ(1, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 5); - EXPECT_EQ(1, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 6); - EXPECT_EQ(2, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 7); - EXPECT_EQ(2, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 8); - EXPECT_EQ(2, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); - EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 9); - EXPECT_EQ(3, adapter_wrapper_->GetStats().dropped_frames); - - adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); + for (int i = 0; i < 10; ++i) + adapter_wrapper_->AdaptFrame(frame_source_->GetFrame()); EXPECT_GE(adapter_wrapper_->GetStats().captured_frames, 10); EXPECT_EQ(3, adapter_wrapper_->GetStats().dropped_frames); } @@ -318,75 +248,6 @@ TEST_P(VideoAdapterTest, AdaptFramerateToHalfWithNoPixelLimit) { } } -// After the first timestamp, add a big offset to the timestamps. Expect that -// the adapter is conservative and resets to the new offset and does not drop -// any frame. -TEST_P(VideoAdapterTest, AdaptFramerateTimestampOffset) { - const int64_t capture_interval = VideoFormat::FpsToInterval(kDefaultFps); - OnOutputFormatRequest(640, 480, kDefaultFps); - - const int64_t first_timestamp = 0; - adapter_.AdaptFrameResolution(640, 480, first_timestamp, &cropped_width_, - &cropped_height_, &out_width_, &out_height_); - EXPECT_GT(out_width_, 0); - EXPECT_GT(out_height_, 0); - - const int64_t big_offset = -987654321LL * 1000; - const int64_t second_timestamp = big_offset; - adapter_.AdaptFrameResolution(640, 480, second_timestamp, &cropped_width_, - &cropped_height_, &out_width_, &out_height_); - EXPECT_GT(out_width_, 0); - EXPECT_GT(out_height_, 0); - - const int64_t third_timestamp = big_offset + capture_interval; - adapter_.AdaptFrameResolution(640, 480, third_timestamp, &cropped_width_, - &cropped_height_, &out_width_, &out_height_); - EXPECT_GT(out_width_, 0); - EXPECT_GT(out_height_, 0); -} - -// Request 30 fps and send 30 fps with jitter. Expect that no frame is dropped. -TEST_P(VideoAdapterTest, AdaptFramerateTimestampJitter) { - const int64_t capture_interval = VideoFormat::FpsToInterval(kDefaultFps); - OnOutputFormatRequest(640, 480, kDefaultFps); - - adapter_.AdaptFrameResolution(640, 480, capture_interval * 0 / 10, - &cropped_width_, &cropped_height_, &out_width_, - &out_height_); - EXPECT_GT(out_width_, 0); - EXPECT_GT(out_height_, 0); - - adapter_.AdaptFrameResolution(640, 480, capture_interval * 10 / 10 - 1, - &cropped_width_, &cropped_height_, &out_width_, - &out_height_); - EXPECT_GT(out_width_, 0); - EXPECT_GT(out_height_, 0); - - adapter_.AdaptFrameResolution(640, 480, capture_interval * 25 / 10, - &cropped_width_, &cropped_height_, &out_width_, - &out_height_); - EXPECT_GT(out_width_, 0); - EXPECT_GT(out_height_, 0); - - adapter_.AdaptFrameResolution(640, 480, capture_interval * 30 / 10, - &cropped_width_, &cropped_height_, &out_width_, - &out_height_); - EXPECT_GT(out_width_, 0); - EXPECT_GT(out_height_, 0); - - adapter_.AdaptFrameResolution(640, 480, capture_interval * 35 / 10, - &cropped_width_, &cropped_height_, &out_width_, - &out_height_); - EXPECT_GT(out_width_, 0); - EXPECT_GT(out_height_, 0); - - adapter_.AdaptFrameResolution(640, 480, capture_interval * 50 / 10, - &cropped_width_, &cropped_height_, &out_width_, - &out_height_); - EXPECT_GT(out_width_, 0); - EXPECT_GT(out_height_, 0); -} - // Adapt the frame rate to be half of the capture rate after capturing no less // than 10 frames. Expect no frame dropped before adaptation and frame dropped // after adaptation. diff --git a/media/base/video_broadcaster.cc b/media/base/video_broadcaster.cc index 3c20eca963..1167d7fb32 100644 --- a/media/base/video_broadcaster.cc +++ b/media/base/video_broadcaster.cc @@ -30,8 +30,15 @@ void VideoBroadcaster::AddOrUpdateSink( RTC_DCHECK(sink != nullptr); webrtc::MutexLock lock(&sinks_and_wants_lock_); if (!FindSinkPair(sink)) { - // |Sink| is a new sink, which didn't receive previous frame. + // `Sink` is a new sink, which didn't receive previous frame. previous_frame_sent_to_all_sinks_ = false; + + if (last_constraints_.has_value()) { + RTC_LOG(LS_INFO) << __func__ << " forwarding stored constraints min_fps " + << last_constraints_->min_fps.value_or(-1) << " max_fps " + << last_constraints_->max_fps.value_or(-1); + sink->OnConstraintsChanged(*last_constraints_); + } } VideoSourceBase::AddOrUpdateSink(sink, wants); UpdateWants(); @@ -100,6 +107,18 @@ void VideoBroadcaster::OnDiscardedFrame() { } } +void VideoBroadcaster::ProcessConstraints( + const webrtc::VideoTrackSourceConstraints& constraints) { + webrtc::MutexLock lock(&sinks_and_wants_lock_); + RTC_LOG(LS_INFO) << __func__ << " min_fps " + << constraints.min_fps.value_or(-1) << " max_fps " + << constraints.max_fps.value_or(-1) << " broadcasting to " + << sink_pairs().size() << " sinks."; + last_constraints_ = constraints; + for (auto& sink_pair : sink_pairs()) + sink_pair.sink->OnConstraintsChanged(constraints); +} + void VideoBroadcaster::UpdateWants() { VideoSinkWants wants; wants.rotation_applied = false; diff --git a/media/base/video_broadcaster.h b/media/base/video_broadcaster.h index 2f4e578224..c253d44b09 100644 --- a/media/base/video_broadcaster.h +++ b/media/base/video_broadcaster.h @@ -11,6 +11,7 @@ #ifndef MEDIA_BASE_VIDEO_BROADCASTER_H_ #define MEDIA_BASE_VIDEO_BROADCASTER_H_ +#include "api/media_stream_interface.h" #include "api/scoped_refptr.h" #include "api/sequence_checker.h" #include "api/video/video_frame_buffer.h" @@ -31,6 +32,11 @@ class VideoBroadcaster : public VideoSourceBase, public: VideoBroadcaster(); ~VideoBroadcaster() override; + + // Adds a new, or updates an already existing sink. If the sink is new and + // ProcessConstraints has been called previously, the new sink's + // OnConstraintsCalled method will be invoked with the most recent + // constraints. void AddOrUpdateSink(VideoSinkInterface* sink, const VideoSinkWants& wants) override; void RemoveSink(VideoSinkInterface* sink) override; @@ -50,6 +56,11 @@ class VideoBroadcaster : public VideoSourceBase, void OnDiscardedFrame() override; + // Called on the network thread when constraints change. Forwards the + // constraints to sinks added with AddOrUpdateSink via OnConstraintsChanged. + void ProcessConstraints( + const webrtc::VideoTrackSourceConstraints& constraints); + protected: void UpdateWants() RTC_EXCLUSIVE_LOCKS_REQUIRED(sinks_and_wants_lock_); const rtc::scoped_refptr& GetBlackFrameBuffer( @@ -62,6 +73,8 @@ class VideoBroadcaster : public VideoSourceBase, rtc::scoped_refptr black_frame_buffer_; bool previous_frame_sent_to_all_sinks_ RTC_GUARDED_BY(sinks_and_wants_lock_) = true; + absl::optional last_constraints_ + RTC_GUARDED_BY(sinks_and_wants_lock_); }; } // namespace rtc diff --git a/media/base/video_broadcaster_unittest.cc b/media/base/video_broadcaster_unittest.cc index b007278547..b9672375a7 100644 --- a/media/base/video_broadcaster_unittest.cc +++ b/media/base/video_broadcaster_unittest.cc @@ -16,13 +16,31 @@ #include "api/video/i420_buffer.h" #include "api/video/video_frame.h" #include "api/video/video_rotation.h" +#include "api/video/video_source_interface.h" #include "media/base/fake_video_renderer.h" +#include "test/gmock.h" #include "test/gtest.h" using cricket::FakeVideoRenderer; using rtc::VideoBroadcaster; using rtc::VideoSinkWants; +using ::testing::AllOf; +using ::testing::Eq; +using ::testing::Field; +using ::testing::Mock; +using ::testing::Optional; + +class MockSink : public rtc::VideoSinkInterface { + public: + void OnFrame(const webrtc::VideoFrame&) override {} + + MOCK_METHOD(void, + OnConstraintsChanged, + (const webrtc::VideoTrackSourceConstraints& constraints), + (override)); +}; + TEST(VideoBroadcasterTest, frame_wanted) { VideoBroadcaster broadcaster; EXPECT_FALSE(broadcaster.frame_wanted()); @@ -232,3 +250,83 @@ TEST(VideoBroadcasterTest, SinkWantsBlackFrames) { EXPECT_TRUE(sink2.black_frame()); EXPECT_EQ(30, sink2.timestamp_us()); } + +TEST(VideoBroadcasterTest, ConstraintsChangedNotCalledOnSinkAddition) { + MockSink sink; + VideoBroadcaster broadcaster; + EXPECT_CALL(sink, OnConstraintsChanged).Times(0); + broadcaster.AddOrUpdateSink(&sink, VideoSinkWants()); +} + +TEST(VideoBroadcasterTest, ForwardsLastConstraintsOnAdd) { + MockSink sink; + VideoBroadcaster broadcaster; + broadcaster.ProcessConstraints(webrtc::VideoTrackSourceConstraints{2, 3}); + broadcaster.ProcessConstraints(webrtc::VideoTrackSourceConstraints{1, 4}); + EXPECT_CALL( + sink, + OnConstraintsChanged(AllOf( + Field(&webrtc::VideoTrackSourceConstraints::min_fps, Optional(1)), + Field(&webrtc::VideoTrackSourceConstraints::max_fps, Optional(4))))); + broadcaster.AddOrUpdateSink(&sink, VideoSinkWants()); +} + +TEST(VideoBroadcasterTest, UpdatesOnlyNewSinksWithConstraints) { + MockSink sink1; + VideoBroadcaster broadcaster; + broadcaster.AddOrUpdateSink(&sink1, VideoSinkWants()); + broadcaster.ProcessConstraints(webrtc::VideoTrackSourceConstraints{1, 4}); + Mock::VerifyAndClearExpectations(&sink1); + EXPECT_CALL(sink1, OnConstraintsChanged).Times(0); + MockSink sink2; + EXPECT_CALL( + sink2, + OnConstraintsChanged(AllOf( + Field(&webrtc::VideoTrackSourceConstraints::min_fps, Optional(1)), + Field(&webrtc::VideoTrackSourceConstraints::max_fps, Optional(4))))); + broadcaster.AddOrUpdateSink(&sink2, VideoSinkWants()); +} + +TEST(VideoBroadcasterTest, ForwardsConstraintsToSink) { + MockSink sink; + VideoBroadcaster broadcaster; + EXPECT_CALL(sink, OnConstraintsChanged).Times(0); + broadcaster.AddOrUpdateSink(&sink, VideoSinkWants()); + Mock::VerifyAndClearExpectations(&sink); + + EXPECT_CALL(sink, OnConstraintsChanged(AllOf( + Field(&webrtc::VideoTrackSourceConstraints::min_fps, + Eq(absl::nullopt)), + Field(&webrtc::VideoTrackSourceConstraints::max_fps, + Eq(absl::nullopt))))); + broadcaster.ProcessConstraints( + webrtc::VideoTrackSourceConstraints{absl::nullopt, absl::nullopt}); + Mock::VerifyAndClearExpectations(&sink); + + EXPECT_CALL( + sink, + OnConstraintsChanged(AllOf( + Field(&webrtc::VideoTrackSourceConstraints::min_fps, + Eq(absl::nullopt)), + Field(&webrtc::VideoTrackSourceConstraints::max_fps, Optional(3))))); + broadcaster.ProcessConstraints( + webrtc::VideoTrackSourceConstraints{absl::nullopt, 3}); + Mock::VerifyAndClearExpectations(&sink); + + EXPECT_CALL( + sink, + OnConstraintsChanged(AllOf( + Field(&webrtc::VideoTrackSourceConstraints::min_fps, Optional(2)), + Field(&webrtc::VideoTrackSourceConstraints::max_fps, + Eq(absl::nullopt))))); + broadcaster.ProcessConstraints( + webrtc::VideoTrackSourceConstraints{2, absl::nullopt}); + Mock::VerifyAndClearExpectations(&sink); + + EXPECT_CALL( + sink, + OnConstraintsChanged(AllOf( + Field(&webrtc::VideoTrackSourceConstraints::min_fps, Optional(2)), + Field(&webrtc::VideoTrackSourceConstraints::max_fps, Optional(3))))); + broadcaster.ProcessConstraints(webrtc::VideoTrackSourceConstraints{2, 3}); +} diff --git a/media/base/video_common.h b/media/base/video_common.h index e7ad22f9ae..f27e008d26 100644 --- a/media/base/video_common.h +++ b/media/base/video_common.h @@ -213,10 +213,10 @@ struct RTC_EXPORT VideoFormat : VideoFormatPod { std::string ToString() const; }; -// Returns the largest positive integer that divides both |a| and |b|. +// Returns the largest positive integer that divides both `a` and `b`. int GreatestCommonDivisor(int a, int b); -// Returns the smallest positive integer that is divisible by both |a| and |b|. +// Returns the smallest positive integer that is divisible by both `a` and `b`. int LeastCommonMultiple(int a, int b); } // namespace cricket diff --git a/media/base/vp9_profile.h b/media/base/vp9_profile.h deleted file mode 100644 index d44a7998d2..0000000000 --- a/media/base/vp9_profile.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef MEDIA_BASE_VP9_PROFILE_H_ -#define MEDIA_BASE_VP9_PROFILE_H_ - -#include "api/video_codecs/vp9_profile.h" - -// TODO(crbug.com/1187565): Remove this file once downstream projects stop -// depend on it. - -#endif // MEDIA_BASE_VP9_PROFILE_H_ diff --git a/media/engine/encoder_simulcast_proxy.cc b/media/engine/encoder_simulcast_proxy.cc index 7a6638f56f..f906847efe 100644 --- a/media/engine/encoder_simulcast_proxy.cc +++ b/media/engine/encoder_simulcast_proxy.cc @@ -22,10 +22,7 @@ EncoderSimulcastProxy::EncoderSimulcastProxy(VideoEncoderFactory* factory, encoder_ = factory_->CreateVideoEncoder(format); } -EncoderSimulcastProxy::EncoderSimulcastProxy(VideoEncoderFactory* factory) - : EncoderSimulcastProxy(factory, SdpVideoFormat("VP8")) {} - -EncoderSimulcastProxy::~EncoderSimulcastProxy() {} +EncoderSimulcastProxy::~EncoderSimulcastProxy() = default; int EncoderSimulcastProxy::Release() { return encoder_->Release(); diff --git a/media/engine/encoder_simulcast_proxy.h b/media/engine/encoder_simulcast_proxy.h index 8e9e0ffb6e..a8c28add64 100644 --- a/media/engine/encoder_simulcast_proxy.h +++ b/media/engine/encoder_simulcast_proxy.h @@ -35,10 +35,6 @@ class RTC_EXPORT EncoderSimulcastProxy : public VideoEncoder { public: EncoderSimulcastProxy(VideoEncoderFactory* factory, const SdpVideoFormat& format); - // Deprecated. Remove once all clients use constructor with both factory and - // SdpVideoFormat; - explicit EncoderSimulcastProxy(VideoEncoderFactory* factory); - ~EncoderSimulcastProxy() override; // Implements VideoEncoder. diff --git a/media/engine/encoder_simulcast_proxy_unittest.cc b/media/engine/encoder_simulcast_proxy_unittest.cc index e5eb7a3703..fbd9d554bd 100644 --- a/media/engine/encoder_simulcast_proxy_unittest.cc +++ b/media/engine/encoder_simulcast_proxy_unittest.cc @@ -184,35 +184,5 @@ TEST(EncoderSimulcastProxy, ForwardsHardwareAccelerated) { EXPECT_TRUE(simulcast_enabled_proxy.GetEncoderInfo().is_hardware_accelerated); } -TEST(EncoderSimulcastProxy, ForwardsInternalSource) { - auto mock_encoder_owned = std::make_unique>(); - NiceMock* mock_encoder = mock_encoder_owned.get(); - NiceMock simulcast_factory; - - EXPECT_CALL(*mock_encoder, InitEncode(_, _)) - .WillOnce(Return(WEBRTC_VIDEO_CODEC_OK)); - - EXPECT_CALL(simulcast_factory, CreateVideoEncoder) - .Times(1) - .WillOnce(Return(ByMove(std::move(mock_encoder_owned)))); - - EncoderSimulcastProxy simulcast_enabled_proxy(&simulcast_factory, - SdpVideoFormat("VP8")); - VideoCodec codec_settings; - webrtc::test::CodecSettings(kVideoCodecVP8, &codec_settings); - EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, - simulcast_enabled_proxy.InitEncode(&codec_settings, kSettings)); - - VideoEncoder::EncoderInfo info; - - info.has_internal_source = false; - EXPECT_CALL(*mock_encoder, GetEncoderInfo()).WillOnce(Return(info)); - EXPECT_FALSE(simulcast_enabled_proxy.GetEncoderInfo().has_internal_source); - - info.has_internal_source = true; - EXPECT_CALL(*mock_encoder, GetEncoderInfo()).WillOnce(Return(info)); - EXPECT_TRUE(simulcast_enabled_proxy.GetEncoderInfo().has_internal_source); -} - } // namespace testing } // namespace webrtc diff --git a/media/engine/fake_webrtc_call.cc b/media/engine/fake_webrtc_call.cc index e8c7f6e0c9..7b9174d961 100644 --- a/media/engine/fake_webrtc_call.cc +++ b/media/engine/fake_webrtc_call.cc @@ -14,12 +14,15 @@ #include "absl/algorithm/container.h" #include "api/call/audio_sink.h" -#include "media/base/rtp_utils.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "rtc_base/checks.h" #include "rtc_base/gunit.h" #include "rtc_base/thread.h" namespace cricket { + +using ::webrtc::ParseRtpSsrc; + FakeAudioSendStream::FakeAudioSendStream( int id, const webrtc::AudioSendStream::Config& config) @@ -113,6 +116,10 @@ void FakeAudioReceiveStream::SetUseTransportCcAndNackHistory( config_.rtp.nack.rtp_history_ms = history_ms; } +void FakeAudioReceiveStream::SetNonSenderRttMeasurement(bool enabled) { + config_.enable_non_sender_rtt = enabled; +} + void FakeAudioReceiveStream::SetFrameDecryptor( rtc::scoped_refptr frame_decryptor) { config_.frame_decryptor = std::move(frame_decryptor); @@ -368,6 +375,11 @@ webrtc::VideoReceiveStream::Stats FakeVideoReceiveStream::GetStats() const { return stats_; } +void FakeVideoReceiveStream::SetRtpExtensions( + std::vector extensions) { + config_.rtp.extensions = std::move(extensions); +} + void FakeVideoReceiveStream::Start() { receiving_ = true; } @@ -385,6 +397,11 @@ FakeFlexfecReceiveStream::FakeFlexfecReceiveStream( const webrtc::FlexfecReceiveStream::Config& config) : config_(config) {} +void FakeFlexfecReceiveStream::SetRtpExtensions( + std::vector extensions) { + config_.rtp.extensions = std::move(extensions); +} + const webrtc::FlexfecReceiveStream::Config& FakeFlexfecReceiveStream::GetConfig() const { return config_; @@ -396,7 +413,7 @@ webrtc::FlexfecReceiveStream::Stats FakeFlexfecReceiveStream::GetStats() const { } void FakeFlexfecReceiveStream::OnRtpPacket(const webrtc::RtpPacketReceived&) { - RTC_NOTREACHED() << "Not implemented."; + RTC_DCHECK_NOTREACHED() << "Not implemented."; } FakeCall::FakeCall() @@ -601,10 +618,11 @@ FakeCall::DeliveryStatus FakeCall::DeliverPacket(webrtc::MediaType media_type, RTC_DCHECK(media_type == webrtc::MediaType::AUDIO || media_type == webrtc::MediaType::VIDEO); - uint32_t ssrc; - if (!GetRtpSsrc(packet.cdata(), packet.size(), &ssrc)) + if (!webrtc::IsRtpPacket(packet)) { return DELIVERY_PACKET_ERROR; + } + uint32_t ssrc = ParseRtpSsrc(packet); if (media_type == webrtc::MediaType::VIDEO) { for (auto receiver : video_receive_streams_) { if (receiver->GetConfig().rtp.remote_ssrc == ssrc) { diff --git a/media/engine/fake_webrtc_call.h b/media/engine/fake_webrtc_call.h index aeef95477e..b84c385b9c 100644 --- a/media/engine/fake_webrtc_call.h +++ b/media/engine/fake_webrtc_call.h @@ -122,6 +122,7 @@ class FakeAudioReceiveStream final : public webrtc::AudioReceiveStream { std::map decoder_map) override; void SetUseTransportCcAndNackHistory(bool use_transport_cc, int history_ms) override; + void SetNonSenderRttMeasurement(bool enabled) override; void SetFrameDecryptor(rtc::scoped_refptr frame_decryptor) override; void SetRtpExtensions(std::vector extensions) override; @@ -192,8 +193,7 @@ class FakeVideoSendStream final void OnFrame(const webrtc::VideoFrame& frame) override; // webrtc::VideoSendStream implementation. - void UpdateActiveSimulcastLayers( - const std::vector active_layers) override; + void UpdateActiveSimulcastLayers(std::vector active_layers) override; void Start() override; void Stop() override; bool started() override { return IsSending(); } @@ -263,9 +263,12 @@ class FakeVideoReceiveStream final : public webrtc::VideoReceiveStream { private: // webrtc::VideoReceiveStream implementation. + void SetRtpExtensions(std::vector extensions) override; + const webrtc::ReceiveStream::RtpConfig& rtp_config() const override { return config_.rtp; } + void Start() override; void Stop() override; @@ -292,6 +295,8 @@ class FakeFlexfecReceiveStream final : public webrtc::FlexfecReceiveStream { explicit FakeFlexfecReceiveStream( const webrtc::FlexfecReceiveStream::Config& config); + void SetRtpExtensions(std::vector extensions) override; + const webrtc::ReceiveStream::RtpConfig& rtp_config() const override { return config_.rtp; } diff --git a/media/engine/fake_webrtc_video_engine.cc b/media/engine/fake_webrtc_video_engine.cc index c1fc2e6fa0..7383c26399 100644 --- a/media/engine/fake_webrtc_video_engine.cc +++ b/media/engine/fake_webrtc_video_engine.cc @@ -26,18 +26,6 @@ namespace { static const int kEventTimeoutMs = 10000; -bool IsFormatSupported( - const std::vector& supported_formats, - const webrtc::SdpVideoFormat& format) { - for (const webrtc::SdpVideoFormat& supported_format : supported_formats) { - if (IsSameCodec(format.name, format.parameters, supported_format.name, - supported_format.parameters)) { - return true; - } - } - return false; -} - } // namespace // Decoder. @@ -51,8 +39,8 @@ FakeWebRtcVideoDecoder::~FakeWebRtcVideoDecoder() { } } -int32_t FakeWebRtcVideoDecoder::InitDecode(const webrtc::VideoCodec*, int32_t) { - return WEBRTC_VIDEO_CODEC_OK; +bool FakeWebRtcVideoDecoder::Configure(const Settings& settings) { + return true; } int32_t FakeWebRtcVideoDecoder::Decode(const webrtc::EncodedImage&, @@ -85,7 +73,7 @@ FakeWebRtcVideoDecoderFactory::GetSupportedFormats() const { for (const webrtc::SdpVideoFormat& format : supported_codec_formats_) { // Don't add same codec twice. - if (!IsFormatSupported(formats, format)) + if (!format.IsCodecInList(formats)) formats.push_back(format); } @@ -95,7 +83,7 @@ FakeWebRtcVideoDecoderFactory::GetSupportedFormats() const { std::unique_ptr FakeWebRtcVideoDecoderFactory::CreateVideoDecoder( const webrtc::SdpVideoFormat& format) { - if (IsFormatSupported(supported_codec_formats_, format)) { + if (format.IsCodecInList(supported_codec_formats_)) { num_created_decoders_++; std::unique_ptr decoder = std::make_unique(this); @@ -179,7 +167,6 @@ webrtc::VideoEncoder::EncoderInfo FakeWebRtcVideoEncoder::GetEncoderInfo() const { EncoderInfo info; info.is_hardware_accelerated = true; - info.has_internal_source = false; return info; } @@ -200,7 +187,6 @@ int FakeWebRtcVideoEncoder::GetNumEncodedFrames() { // Video encoder factory. FakeWebRtcVideoEncoderFactory::FakeWebRtcVideoEncoderFactory() : num_created_encoders_(0), - encoders_have_internal_sources_(false), vp8_factory_mode_(false) {} std::vector @@ -209,7 +195,7 @@ FakeWebRtcVideoEncoderFactory::GetSupportedFormats() const { for (const webrtc::SdpVideoFormat& format : formats_) { // Don't add same codec twice. - if (!IsFormatSupported(formats, format)) + if (!format.IsCodecInList(formats)) formats.push_back(format); } @@ -221,7 +207,7 @@ FakeWebRtcVideoEncoderFactory::CreateVideoEncoder( const webrtc::SdpVideoFormat& format) { webrtc::MutexLock lock(&mutex_); std::unique_ptr encoder; - if (IsFormatSupported(formats_, format)) { + if (format.IsCodecInList(formats_)) { if (absl::EqualsIgnoreCase(format.name, kVp8CodecName) && !vp8_factory_mode_) { // The simulcast adapter will ask this factory for multiple VP8 @@ -239,14 +225,6 @@ FakeWebRtcVideoEncoderFactory::CreateVideoEncoder( return encoder; } -webrtc::VideoEncoderFactory::CodecInfo -FakeWebRtcVideoEncoderFactory::QueryVideoEncoder( - const webrtc::SdpVideoFormat& format) const { - webrtc::VideoEncoderFactory::CodecInfo info; - info.has_internal_source = encoders_have_internal_sources_; - return info; -} - bool FakeWebRtcVideoEncoderFactory::WaitForCreatedVideoEncoders( int num_encoders) { int64_t start_offset_ms = rtc::TimeMillis(); @@ -266,11 +244,6 @@ void FakeWebRtcVideoEncoderFactory::EncoderDestroyed( encoders_.end()); } -void FakeWebRtcVideoEncoderFactory::set_encoders_have_internal_sources( - bool internal_source) { - encoders_have_internal_sources_ = internal_source; -} - void FakeWebRtcVideoEncoderFactory::AddSupportedVideoCodec( const webrtc::SdpVideoFormat& format) { formats_.push_back(format); diff --git a/media/engine/fake_webrtc_video_engine.h b/media/engine/fake_webrtc_video_engine.h index 9adb5a41ef..37ef53add7 100644 --- a/media/engine/fake_webrtc_video_engine.h +++ b/media/engine/fake_webrtc_video_engine.h @@ -44,7 +44,7 @@ class FakeWebRtcVideoDecoder : public webrtc::VideoDecoder { explicit FakeWebRtcVideoDecoder(FakeWebRtcVideoDecoderFactory* factory); ~FakeWebRtcVideoDecoder(); - int32_t InitDecode(const webrtc::VideoCodec*, int32_t) override; + bool Configure(const Settings& settings) override; int32_t Decode(const webrtc::EncodedImage&, bool, int64_t) override; int32_t RegisterDecodeCompleteCallback( webrtc::DecodedImageCallback*) override; @@ -116,8 +116,6 @@ class FakeWebRtcVideoEncoderFactory : public webrtc::VideoEncoderFactory { std::vector GetSupportedFormats() const override; std::unique_ptr CreateVideoEncoder( const webrtc::SdpVideoFormat& format) override; - CodecInfo QueryVideoEncoder( - const webrtc::SdpVideoFormat& format) const override; bool WaitForCreatedVideoEncoders(int num_encoders); void EncoderDestroyed(FakeWebRtcVideoEncoder* encoder); @@ -133,7 +131,6 @@ class FakeWebRtcVideoEncoderFactory : public webrtc::VideoEncoderFactory { std::vector formats_; std::vector encoders_ RTC_GUARDED_BY(mutex_); int num_created_encoders_ RTC_GUARDED_BY(mutex_); - bool encoders_have_internal_sources_; bool vp8_factory_mode_; }; diff --git a/media/engine/internal_decoder_factory.cc b/media/engine/internal_decoder_factory.cc index a8d1f00009..c24c488546 100644 --- a/media/engine/internal_decoder_factory.cc +++ b/media/engine/internal_decoder_factory.cc @@ -12,6 +12,7 @@ #include "absl/strings/match.h" #include "api/video_codecs/sdp_video_format.h" +#include "api/video_codecs/video_codec.h" #include "media/base/codec.h" #include "media/base/media_constants.h" #include "modules/video_coding/codecs/av1/libaom_av1_decoder.h" @@ -20,8 +21,25 @@ #include "modules/video_coding/codecs/vp9/include/vp9.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" +#include "system_wrappers/include/field_trial.h" + +#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY) +#include "modules/video_coding/codecs/av1/dav1d_decoder.h" // nogncheck +#endif namespace webrtc { +namespace { +constexpr char kDav1dFieldTrial[] = "WebRTC-Dav1dDecoder"; +#if defined(RTC_DAV1D_IN_INTERNAL_DECODER_FACTORY) +constexpr bool kDav1dIsIncluded = true; +#else +constexpr bool kDav1dIsIncluded = false; +std::unique_ptr CreateDav1dDecoder() { + return nullptr; +} +#endif + +} // namespace std::vector InternalDecoderFactory::GetSupportedFormats() const { @@ -29,13 +47,35 @@ std::vector InternalDecoderFactory::GetSupportedFormats() formats.push_back(SdpVideoFormat(cricket::kVp8CodecName)); for (const SdpVideoFormat& format : SupportedVP9DecoderCodecs()) formats.push_back(format); - for (const SdpVideoFormat& h264_format : SupportedH264Codecs()) + for (const SdpVideoFormat& h264_format : SupportedH264DecoderCodecs()) formats.push_back(h264_format); - if (kIsLibaomAv1DecoderSupported) + + if (kIsLibaomAv1DecoderSupported || + (kDav1dIsIncluded && field_trial::IsEnabled(kDav1dFieldTrial))) { formats.push_back(SdpVideoFormat(cricket::kAv1CodecName)); + } + return formats; } +VideoDecoderFactory::CodecSupport InternalDecoderFactory::QueryCodecSupport( + const SdpVideoFormat& format, + bool reference_scaling) const { + // Query for supported formats and check if the specified format is supported. + // Return unsupported if an invalid combination of format and + // reference_scaling is specified. + if (reference_scaling) { + VideoCodecType codec = PayloadStringToCodecType(format.name); + if (codec != kVideoCodecVP9 && codec != kVideoCodecAV1) { + return {/*is_supported=*/false, /*is_power_efficient=*/false}; + } + } + + CodecSupport codec_support; + codec_support.is_supported = format.IsCodecInList(GetSupportedFormats()); + return codec_support; +} + std::unique_ptr InternalDecoderFactory::CreateVideoDecoder( const SdpVideoFormat& format) { if (!format.IsCodecInList(GetSupportedFormats())) { @@ -50,11 +90,18 @@ std::unique_ptr InternalDecoderFactory::CreateVideoDecoder( return VP9Decoder::Create(); if (absl::EqualsIgnoreCase(format.name, cricket::kH264CodecName)) return H264Decoder::Create(); - if (kIsLibaomAv1DecoderSupported && - absl::EqualsIgnoreCase(format.name, cricket::kAv1CodecName)) + + if (absl::EqualsIgnoreCase(format.name, cricket::kAv1CodecName) && + kDav1dIsIncluded && field_trial::IsEnabled(kDav1dFieldTrial)) { + return CreateDav1dDecoder(); + } + + if (absl::EqualsIgnoreCase(format.name, cricket::kAv1CodecName) && + kIsLibaomAv1DecoderSupported) { return CreateLibaomAv1Decoder(); + } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return nullptr; } diff --git a/media/engine/internal_decoder_factory.h b/media/engine/internal_decoder_factory.h index 2a580dea0b..0129fb2173 100644 --- a/media/engine/internal_decoder_factory.h +++ b/media/engine/internal_decoder_factory.h @@ -24,6 +24,8 @@ namespace webrtc { class RTC_EXPORT InternalDecoderFactory : public VideoDecoderFactory { public: std::vector GetSupportedFormats() const override; + CodecSupport QueryCodecSupport(const SdpVideoFormat& format, + bool reference_scaling) const override; std::unique_ptr CreateVideoDecoder( const SdpVideoFormat& format) override; }; diff --git a/media/engine/internal_decoder_factory_unittest.cc b/media/engine/internal_decoder_factory_unittest.cc index a2a69211b9..d70390f934 100644 --- a/media/engine/internal_decoder_factory_unittest.cc +++ b/media/engine/internal_decoder_factory_unittest.cc @@ -19,48 +19,116 @@ #include "test/gtest.h" namespace webrtc { - +namespace { using ::testing::Contains; using ::testing::Field; using ::testing::Not; -TEST(InternalDecoderFactory, TestVP8) { +#ifdef RTC_ENABLE_VP9 +constexpr bool kVp9Enabled = true; +#else +constexpr bool kVp9Enabled = false; +#endif +#ifdef WEBRTC_USE_H264 +constexpr bool kH264Enabled = true; +#else +constexpr bool kH264Enabled = false; +#endif +constexpr VideoDecoderFactory::CodecSupport kSupported = { + /*is_supported=*/true, /*is_power_efficient=*/false}; +constexpr VideoDecoderFactory::CodecSupport kUnsupported = { + /*is_supported=*/false, /*is_power_efficient=*/false}; + +MATCHER_P(Support, expected, "") { + return arg.is_supported == expected.is_supported && + arg.is_power_efficient == expected.is_power_efficient; +} + +TEST(InternalDecoderFactoryTest, Vp8) { InternalDecoderFactory factory; std::unique_ptr decoder = factory.CreateVideoDecoder(SdpVideoFormat(cricket::kVp8CodecName)); EXPECT_TRUE(decoder); } -#ifdef RTC_ENABLE_VP9 -TEST(InternalDecoderFactory, TestVP9Profile0) { +TEST(InternalDecoderFactoryTest, Vp9Profile0) { InternalDecoderFactory factory; std::unique_ptr decoder = factory.CreateVideoDecoder(SdpVideoFormat( cricket::kVp9CodecName, {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}})); - EXPECT_TRUE(decoder); + EXPECT_EQ(static_cast(decoder), kVp9Enabled); } -TEST(InternalDecoderFactory, TestVP9Profile1) { +TEST(InternalDecoderFactoryTest, Vp9Profile1) { InternalDecoderFactory factory; std::unique_ptr decoder = factory.CreateVideoDecoder(SdpVideoFormat( cricket::kVp9CodecName, {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile1)}})); - EXPECT_TRUE(decoder); + EXPECT_EQ(static_cast(decoder), kVp9Enabled); +} + +TEST(InternalDecoderFactoryTest, H264) { + InternalDecoderFactory factory; + std::unique_ptr decoder = + factory.CreateVideoDecoder(SdpVideoFormat(cricket::kH264CodecName)); + EXPECT_EQ(static_cast(decoder), kH264Enabled); } -#endif // RTC_ENABLE_VP9 -TEST(InternalDecoderFactory, Av1) { +TEST(InternalDecoderFactoryTest, Av1) { InternalDecoderFactory factory; if (kIsLibaomAv1DecoderSupported) { EXPECT_THAT(factory.GetSupportedFormats(), - Contains(Field(&SdpVideoFormat::name, "AV1X"))); - EXPECT_TRUE(factory.CreateVideoDecoder(SdpVideoFormat("AV1X"))); + Contains(Field(&SdpVideoFormat::name, cricket::kAv1CodecName))); + EXPECT_TRUE( + factory.CreateVideoDecoder(SdpVideoFormat(cricket::kAv1CodecName))); } else { - EXPECT_THAT(factory.GetSupportedFormats(), - Not(Contains(Field(&SdpVideoFormat::name, "AV1X")))); + EXPECT_THAT( + factory.GetSupportedFormats(), + Not(Contains(Field(&SdpVideoFormat::name, cricket::kAv1CodecName)))); } } +TEST(InternalDecoderFactoryTest, QueryCodecSupportNoReferenceScaling) { + InternalDecoderFactory factory; + EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp8CodecName), + /*reference_scaling=*/false), + Support(kSupported)); + EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp9CodecName), + /*reference_scaling=*/false), + Support(kVp9Enabled ? kSupported : kUnsupported)); + EXPECT_THAT(factory.QueryCodecSupport( + SdpVideoFormat(cricket::kVp9CodecName, + {{kVP9FmtpProfileId, + VP9ProfileToString(VP9Profile::kProfile1)}}), + /*reference_scaling=*/false), + Support(kVp9Enabled ? kSupported : kUnsupported)); + EXPECT_THAT( + factory.QueryCodecSupport(SdpVideoFormat(cricket::kAv1CodecName), + /*reference_scaling=*/false), + Support(kIsLibaomAv1DecoderSupported ? kSupported : kUnsupported)); +} + +TEST(InternalDecoderFactoryTest, QueryCodecSupportReferenceScaling) { + InternalDecoderFactory factory; + // VP9 and AV1 support for spatial layers. + EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp9CodecName), + /*reference_scaling=*/true), + Support(kVp9Enabled ? kSupported : kUnsupported)); + EXPECT_THAT( + factory.QueryCodecSupport(SdpVideoFormat(cricket::kAv1CodecName), + /*reference_scaling=*/true), + Support(kIsLibaomAv1DecoderSupported ? kSupported : kUnsupported)); + + // Invalid config even though VP8 and H264 are supported. + EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kH264CodecName), + /*reference_scaling=*/true), + Support(kUnsupported)); + EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp8CodecName), + /*reference_scaling=*/true), + Support(kUnsupported)); +} + +} // namespace } // namespace webrtc diff --git a/media/engine/internal_encoder_factory.cc b/media/engine/internal_encoder_factory.cc index 738516eafc..5ccb93d0c1 100644 --- a/media/engine/internal_encoder_factory.cc +++ b/media/engine/internal_encoder_factory.cc @@ -16,7 +16,7 @@ #include "api/video_codecs/sdp_video_format.h" #include "media/base/codec.h" #include "media/base/media_constants.h" -#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h" +#include "modules/video_coding/codecs/av1/libaom_av1_encoder_supported.h" #include "modules/video_coding/codecs/h264/include/h264.h" #include "modules/video_coding/codecs/vp8/include/vp8.h" #include "modules/video_coding/codecs/vp9/include/vp9.h" @@ -51,10 +51,44 @@ std::unique_ptr InternalEncoderFactory::CreateVideoEncoder( return H264Encoder::Create(cricket::VideoCodec(format)); if (kIsLibaomAv1EncoderSupported && absl::EqualsIgnoreCase(format.name, cricket::kAv1CodecName)) - return CreateLibaomAv1Encoder(); + return CreateLibaomAv1EncoderIfSupported(); RTC_LOG(LS_ERROR) << "Trying to created encoder of unsupported format " << format.name; return nullptr; } +VideoEncoderFactory::CodecSupport InternalEncoderFactory::QueryCodecSupport( + const SdpVideoFormat& format, + absl::optional scalability_mode) const { + // Query for supported formats and check if the specified format is supported. + // Begin with filtering out unsupported scalability modes. + if (scalability_mode) { + bool scalability_mode_supported = false; + if (absl::EqualsIgnoreCase(format.name, cricket::kVp8CodecName)) { + scalability_mode_supported = + VP8Encoder::SupportsScalabilityMode(*scalability_mode); + } else if (absl::EqualsIgnoreCase(format.name, cricket::kVp9CodecName)) { + scalability_mode_supported = + VP9Encoder::SupportsScalabilityMode(*scalability_mode); + } else if (absl::EqualsIgnoreCase(format.name, cricket::kH264CodecName)) { + scalability_mode_supported = + H264Encoder::SupportsScalabilityMode(*scalability_mode); + } else if (kIsLibaomAv1EncoderSupported && + absl::EqualsIgnoreCase(format.name, cricket::kAv1CodecName)) { + scalability_mode_supported = + LibaomAv1EncoderSupportsScalabilityMode(*scalability_mode); + } + + static constexpr VideoEncoderFactory::CodecSupport kUnsupported = { + /*is_supported=*/false, /*is_power_efficient=*/false}; + if (!scalability_mode_supported) { + return kUnsupported; + } + } + + CodecSupport codec_support; + codec_support.is_supported = format.IsCodecInList(GetSupportedFormats()); + return codec_support; +} + } // namespace webrtc diff --git a/media/engine/internal_encoder_factory.h b/media/engine/internal_encoder_factory.h index 3f43e461a7..e12810cd30 100644 --- a/media/engine/internal_encoder_factory.h +++ b/media/engine/internal_encoder_factory.h @@ -12,8 +12,10 @@ #define MEDIA_ENGINE_INTERNAL_ENCODER_FACTORY_H_ #include +#include #include +#include "absl/types/optional.h" #include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_encoder.h" #include "api/video_codecs/video_encoder_factory.h" @@ -25,7 +27,9 @@ class RTC_EXPORT InternalEncoderFactory : public VideoEncoderFactory { public: static std::vector SupportedFormats(); std::vector GetSupportedFormats() const override; - + CodecSupport QueryCodecSupport( + const SdpVideoFormat& format, + absl::optional scalability_mode) const override; std::unique_ptr CreateVideoEncoder( const SdpVideoFormat& format) override; }; diff --git a/media/engine/internal_encoder_factory_unittest.cc b/media/engine/internal_encoder_factory_unittest.cc new file mode 100644 index 0000000000..c3a667e733 --- /dev/null +++ b/media/engine/internal_encoder_factory_unittest.cc @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "media/engine/internal_encoder_factory.h" + +#include "api/video_codecs/sdp_video_format.h" +#include "api/video_codecs/video_encoder.h" +#include "api/video_codecs/vp9_profile.h" +#include "media/base/media_constants.h" +#include "modules/video_coding/codecs/av1/libaom_av1_encoder_supported.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { +using ::testing::Contains; +using ::testing::Field; +using ::testing::Not; + +#ifdef RTC_ENABLE_VP9 +constexpr bool kVp9Enabled = true; +#else +constexpr bool kVp9Enabled = false; +#endif +#ifdef WEBRTC_USE_H264 +constexpr bool kH264Enabled = true; +#else +constexpr bool kH264Enabled = false; +#endif +constexpr VideoEncoderFactory::CodecSupport kSupported = { + /*is_supported=*/true, /*is_power_efficient=*/false}; +constexpr VideoEncoderFactory::CodecSupport kUnsupported = { + /*is_supported=*/false, /*is_power_efficient=*/false}; + +MATCHER_P(Support, expected, "") { + return arg.is_supported == expected.is_supported && + arg.is_power_efficient == expected.is_power_efficient; +} + +TEST(InternalEncoderFactoryTest, Vp8) { + InternalEncoderFactory factory; + std::unique_ptr encoder = + factory.CreateVideoEncoder(SdpVideoFormat(cricket::kVp8CodecName)); + EXPECT_TRUE(encoder); +} + +TEST(InternalEncoderFactoryTest, Vp9Profile0) { + InternalEncoderFactory factory; + if (kVp9Enabled) { + std::unique_ptr encoder = + factory.CreateVideoEncoder(SdpVideoFormat( + cricket::kVp9CodecName, + {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile0)}})); + EXPECT_TRUE(encoder); + } else { + EXPECT_THAT( + factory.GetSupportedFormats(), + Not(Contains(Field(&SdpVideoFormat::name, cricket::kVp9CodecName)))); + } +} + +TEST(InternalEncoderFactoryTest, H264) { + InternalEncoderFactory factory; + if (kH264Enabled) { + std::unique_ptr encoder = + factory.CreateVideoEncoder(SdpVideoFormat(cricket::kH264CodecName)); + EXPECT_TRUE(encoder); + } else { + EXPECT_THAT( + factory.GetSupportedFormats(), + Not(Contains(Field(&SdpVideoFormat::name, cricket::kH264CodecName)))); + } +} + +TEST(InternalEncoderFactoryTest, Av1) { + InternalEncoderFactory factory; + if (kIsLibaomAv1EncoderSupported) { + EXPECT_THAT(factory.GetSupportedFormats(), + Contains(Field(&SdpVideoFormat::name, cricket::kAv1CodecName))); + EXPECT_TRUE( + factory.CreateVideoEncoder(SdpVideoFormat(cricket::kAv1CodecName))); + } else { + EXPECT_THAT( + factory.GetSupportedFormats(), + Not(Contains(Field(&SdpVideoFormat::name, cricket::kAv1CodecName)))); + } +} + +TEST(InternalEncoderFactoryTest, QueryCodecSupportNoScalabilityMode) { + InternalEncoderFactory factory; + EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp8CodecName), + /*scalability_mode=*/absl::nullopt), + Support(kSupported)); + EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp9CodecName), + /*scalability_mode=*/absl::nullopt), + Support(kVp9Enabled ? kSupported : kUnsupported)); + EXPECT_THAT( + factory.QueryCodecSupport(SdpVideoFormat(cricket::kAv1CodecName), + /*scalability_mode=*/absl::nullopt), + Support(kIsLibaomAv1EncoderSupported ? kSupported : kUnsupported)); +} + +TEST(InternalEncoderFactoryTest, QueryCodecSupportWithScalabilityMode) { + InternalEncoderFactory factory; + // VP8 and VP9 supported for singles spatial layers. + EXPECT_THAT( + factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp8CodecName), "L1T2"), + Support(kSupported)); + EXPECT_THAT( + factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp9CodecName), "L1T3"), + Support(kVp9Enabled ? kSupported : kUnsupported)); + + // VP9 support for spatial layers. + EXPECT_THAT( + factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp9CodecName), "L3T3"), + Support(kVp9Enabled ? kSupported : kUnsupported)); + + EXPECT_THAT( + factory.QueryCodecSupport(SdpVideoFormat(cricket::kAv1CodecName), "L2T1"), + Support(kIsLibaomAv1EncoderSupported ? kSupported : kUnsupported)); + + // Invalid scalability modes even though VP8 and H264 are supported. + EXPECT_THAT(factory.QueryCodecSupport(SdpVideoFormat(cricket::kH264CodecName), + "L2T2"), + Support(kUnsupported)); + EXPECT_THAT( + factory.QueryCodecSupport(SdpVideoFormat(cricket::kVp8CodecName), "L3T3"), + Support(kUnsupported)); +} + +} // namespace +} // namespace webrtc diff --git a/media/engine/multiplex_codec_factory.h b/media/engine/multiplex_codec_factory.h index ea57149a77..a4272a2eb2 100644 --- a/media/engine/multiplex_codec_factory.h +++ b/media/engine/multiplex_codec_factory.h @@ -42,7 +42,7 @@ namespace webrtc { // - Select "multiplex" codec in SDP negotiation. class RTC_EXPORT MultiplexEncoderFactory : public VideoEncoderFactory { public: - // |supports_augmenting_data| defines if the encoder would support augmenting + // `supports_augmenting_data` defines if the encoder would support augmenting // data. If set, the encoder expects to receive video frame buffers of type // AugmentedVideoFrameBuffer. MultiplexEncoderFactory(std::unique_ptr factory, @@ -59,7 +59,7 @@ class RTC_EXPORT MultiplexEncoderFactory : public VideoEncoderFactory { class RTC_EXPORT MultiplexDecoderFactory : public VideoDecoderFactory { public: - // |supports_augmenting_data| defines if the decoder would support augmenting + // `supports_augmenting_data` defines if the decoder would support augmenting // data. If set, the decoder is expected to output video frame buffers of type // AugmentedVideoFrameBuffer. MultiplexDecoderFactory(std::unique_ptr factory, diff --git a/media/engine/payload_type_mapper.cc b/media/engine/payload_type_mapper.cc index cbc0a5340d..c63d1d7221 100644 --- a/media/engine/payload_type_mapper.cc +++ b/media/engine/payload_type_mapper.cc @@ -73,7 +73,12 @@ PayloadTypeMapper::PayloadTypeMapper() {kCodecParamUseInbandFec, kParamValueTrue}}}, 111}, // RED for opus is assigned in the lower range, starting at the top. - {{kRedCodecName, 48000, 2}, 63}, + // Note that the FMTP refers to the opus payload type. + {{kRedCodecName, + 48000, + 2, + {{kCodecParamNotInNameValueFormat, "111/111"}}}, + 63}, // TODO(solenberg): Remove the hard coded 16k,32k,48k DTMF once we // assign payload types dynamically for send side as well. {{kDtmfCodecName, 48000, 1}, 110}, diff --git a/media/engine/payload_type_mapper.h b/media/engine/payload_type_mapper.h index d8ab4a4261..1d5cd7198f 100644 --- a/media/engine/payload_type_mapper.h +++ b/media/engine/payload_type_mapper.h @@ -27,12 +27,12 @@ class PayloadTypeMapper { PayloadTypeMapper(); ~PayloadTypeMapper(); - // Finds the current payload type for |format| or assigns a new one, if no + // Finds the current payload type for `format` or assigns a new one, if no // current mapping exists. Will return an empty value if it was unable to // create a mapping, i.e. if all dynamic payload type ids have been used up. absl::optional GetMappingFor(const webrtc::SdpAudioFormat& format); - // Finds the current payload type for |format|, if any. Returns an empty value + // Finds the current payload type for `format`, if any. Returns an empty value // if no payload type mapping exists for the format. absl::optional FindMappingFor( const webrtc::SdpAudioFormat& format) const; diff --git a/media/engine/payload_type_mapper_unittest.cc b/media/engine/payload_type_mapper_unittest.cc index 9c29827fa9..2d9372454b 100644 --- a/media/engine/payload_type_mapper_unittest.cc +++ b/media/engine/payload_type_mapper_unittest.cc @@ -58,7 +58,8 @@ TEST_F(PayloadTypeMapperTest, WebRTCPayloadTypes) { 48000, 2, {{"minptime", "10"}, {"useinbandfec", "1"}}})); - EXPECT_EQ(63, mapper_.FindMappingFor({kRedCodecName, 48000, 2})); + EXPECT_EQ( + 63, mapper_.FindMappingFor({kRedCodecName, 48000, 2, {{"", "111/111"}}})); // TODO(solenberg): Remove 16k, 32k, 48k DTMF checks once these payload types // are dynamically assigned. EXPECT_EQ(110, mapper_.FindMappingFor({kDtmfCodecName, 48000, 1})); diff --git a/media/engine/simulcast.cc b/media/engine/simulcast.cc index ebc6a240fe..3909685995 100644 --- a/media/engine/simulcast.cc +++ b/media/engine/simulcast.cc @@ -71,16 +71,16 @@ struct SimulcastFormat { int width; int height; // The maximum number of simulcast layers can be used for - // resolutions at |widthxheight| for legacy applications. + // resolutions at `widthxheight` for legacy applications. size_t max_layers; - // The maximum bitrate for encoding stream at |widthxheight|, when we are + // The maximum bitrate for encoding stream at `widthxheight`, when we are // not sending the next higher spatial stream. webrtc::DataRate max_bitrate; - // The target bitrate for encoding stream at |widthxheight|, when this layer + // The target bitrate for encoding stream at `widthxheight`, when this layer // is not the highest layer (i.e., when we are sending another higher spatial // stream). webrtc::DataRate target_bitrate; - // The minimum bitrate needed for encoding stream at |widthxheight|. + // The minimum bitrate needed for encoding stream at `widthxheight`. webrtc::DataRate min_bitrate; }; @@ -175,7 +175,7 @@ int FindSimulcastFormatIndex(int width, return i; } } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return -1; } @@ -210,7 +210,7 @@ SimulcastFormat InterpolateSimulcastFormat( const float rate = (total_pixels_up - total_pixels) / static_cast(total_pixels_up - total_pixels_down); - // Use upper resolution if |rate| is below the configured threshold. + // Use upper resolution if `rate` is below the configured threshold. size_t max_layers = (rate < max_roundup_rate.value_or(kDefaultMaxRoundupRate)) ? formats[index - 1].max_layers : formats[index].max_layers; @@ -296,7 +296,7 @@ size_t LimitSimulcastLayerCount(int width, "Disabled")) { // Max layers from one higher resolution in kSimulcastFormats will be used // if the ratio (pixels_up - pixels) / (pixels_up - pixels_down) is less - // than configured |max_ratio|. pixels_down is the selected index in + // than configured `max_ratio`. pixels_down is the selected index in // kSimulcastFormats based on pixels. webrtc::FieldTrialOptional max_ratio("max_ratio"); webrtc::ParseFieldTrial({&max_ratio}, @@ -369,8 +369,8 @@ std::vector GetNormalSimulcastLayers( // 1|. width = NormalizeSimulcastSize(width, layer_count); height = NormalizeSimulcastSize(height, layer_count); - // Add simulcast streams, from highest resolution (|s| = num_simulcast_layers - // -1) to lowest resolution at |s| = 0. + // Add simulcast streams, from highest resolution (`s` = num_simulcast_layers + // -1) to lowest resolution at `s` = 0. for (size_t s = layer_count - 1;; --s) { layers[s].width = width; layers[s].height = height; diff --git a/media/engine/simulcast.h b/media/engine/simulcast.h index 5defa525dc..aa8c394816 100644 --- a/media/engine/simulcast.h +++ b/media/engine/simulcast.h @@ -21,12 +21,12 @@ namespace cricket { -// Gets the total maximum bitrate for the |streams|. +// Gets the total maximum bitrate for the `streams`. webrtc::DataRate GetTotalMaxBitrate( const std::vector& streams); -// Adds any bitrate of |max_bitrate| that is above the total maximum bitrate for -// the |layers| to the highest quality layer. +// Adds any bitrate of `max_bitrate` that is above the total maximum bitrate for +// the `layers` to the highest quality layer. void BoostMaxSimulcastLayer(webrtc::DataRate max_bitrate, std::vector* layers); diff --git a/media/engine/simulcast_encoder_adapter.cc b/media/engine/simulcast_encoder_adapter.cc index 116f987aa4..9143361e82 100644 --- a/media/engine/simulcast_encoder_adapter.cc +++ b/media/engine/simulcast_encoder_adapter.cc @@ -159,8 +159,8 @@ SimulcastEncoderAdapter::EncoderContext::EncoderContext( void SimulcastEncoderAdapter::EncoderContext::Release() { if (encoder_) { - encoder_->RegisterEncodeCompleteCallback(nullptr); encoder_->Release(); + encoder_->RegisterEncodeCompleteCallback(nullptr); } } @@ -214,7 +214,7 @@ SimulcastEncoderAdapter::StreamContext::ReleaseEncoderContext() && { void SimulcastEncoderAdapter::StreamContext::OnKeyframe(Timestamp timestamp) { is_keyframe_needed_ = false; if (framerate_controller_) { - framerate_controller_->AddFrame(timestamp.ms()); + framerate_controller_->KeepFrame(timestamp.us() * 1000); } } @@ -223,12 +223,7 @@ bool SimulcastEncoderAdapter::StreamContext::ShouldDropFrame( if (!framerate_controller_) { return false; } - - if (framerate_controller_->DropFrame(timestamp.ms())) { - return true; - } - framerate_controller_->AddFrame(timestamp.ms()); - return false; + return framerate_controller_->ShouldDropFrame(timestamp.us() * 1000); } EncodedImageCallback::Result @@ -287,7 +282,7 @@ int SimulcastEncoderAdapter::Release() { RTC_DCHECK_RUN_ON(&encoder_queue_); while (!stream_contexts_.empty()) { - // Move the encoder instances and put it on the |cached_encoder_contexts_| + // Move the encoder instances and put it on the `cached_encoder_contexts_` // where it may possibly be reused from (ordering does not matter). cached_encoder_contexts_.push_front( std::move(stream_contexts_.back()).ReleaseEncoderContext()); @@ -415,7 +410,7 @@ int SimulcastEncoderAdapter::InitEncode( } // Intercept frame encode complete callback only for upper streams, where - // we need to set a correct stream index. Set |parent| to nullptr for the + // we need to set a correct stream index. Set `parent` to nullptr for the // lowest stream to bypass the callback. SimulcastEncoderAdapter* parent = stream_idx > 0 ? this : nullptr; @@ -699,8 +694,8 @@ SimulcastEncoderAdapter::FetchOrCreateEncoderContext( is_lowest_quality_stream && prefer_temporal_support_on_base_layer_; - // Toggling of |prefer_temporal_support| requires encoder recreation. Find - // and reuse encoder with desired |prefer_temporal_support|. Otherwise, if + // Toggling of `prefer_temporal_support` requires encoder recreation. Find + // and reuse encoder with desired `prefer_temporal_support`. Otherwise, if // there is no such encoder in the cache, create a new instance. auto encoder_context_iter = std::find_if(cached_encoder_contexts_.begin(), @@ -714,17 +709,40 @@ SimulcastEncoderAdapter::FetchOrCreateEncoderContext( encoder_context = std::move(*encoder_context_iter); cached_encoder_contexts_.erase(encoder_context_iter); } else { - std::unique_ptr encoder = + std::unique_ptr primary_encoder = primary_encoder_factory_->CreateVideoEncoder(video_format_); - VideoEncoder::EncoderInfo primary_info = encoder->GetEncoderInfo(); - VideoEncoder::EncoderInfo fallback_info = primary_info; + + std::unique_ptr fallback_encoder; if (fallback_encoder_factory_ != nullptr) { - std::unique_ptr fallback_encoder = + fallback_encoder = fallback_encoder_factory_->CreateVideoEncoder(video_format_); + } + + std::unique_ptr encoder; + VideoEncoder::EncoderInfo primary_info; + VideoEncoder::EncoderInfo fallback_info; + + if (primary_encoder != nullptr) { + primary_info = primary_encoder->GetEncoderInfo(); + fallback_info = primary_info; + + if (fallback_encoder == nullptr) { + encoder = std::move(primary_encoder); + } else { + encoder = CreateVideoEncoderSoftwareFallbackWrapper( + std::move(fallback_encoder), std::move(primary_encoder), + prefer_temporal_support); + } + } else if (fallback_encoder != nullptr) { + RTC_LOG(LS_WARNING) << "Failed to create primary " << video_format_.name + << " encoder. Use fallback encoder."; fallback_info = fallback_encoder->GetEncoderInfo(); - encoder = CreateVideoEncoderSoftwareFallbackWrapper( - std::move(fallback_encoder), std::move(encoder), - prefer_temporal_support); + primary_info = fallback_info; + encoder = std::move(fallback_encoder); + } else { + RTC_LOG(LS_ERROR) << "Failed to create primary and fallback " + << video_format_.name << " encoders."; + return nullptr; } encoder_context = std::make_unique( @@ -769,7 +787,7 @@ webrtc::VideoCodec SimulcastEncoderAdapter::MakeStreamCodec( codec_params.VP8()->numberOfTemporalLayers = stream_params.numberOfTemporalLayers; if (!is_highest_quality_stream) { - // For resolutions below CIF, set the codec |complexity| parameter to + // For resolutions below CIF, set the codec `complexity` parameter to // kComplexityHigher, which maps to cpu_used = -4. int pixels_per_frame = codec_params.width * codec_params.height; if (pixels_per_frame < 352 * 288) { @@ -834,7 +852,10 @@ VideoEncoder::EncoderInfo SimulcastEncoderAdapter::GetEncoderInfo() const { // Create one encoder and query it. std::unique_ptr encoder_context = - FetchOrCreateEncoderContext(true); + FetchOrCreateEncoderContext(/*is_lowest_quality_stream=*/true); + if (encoder_context == nullptr) { + return encoder_info; + } const VideoEncoder::EncoderInfo& primary_info = encoder_context->PrimaryInfo(); @@ -875,7 +896,7 @@ VideoEncoder::EncoderInfo SimulcastEncoderAdapter::GetEncoderInfo() const { encoder_impl_info.has_trusted_rate_controller; encoder_info.is_hardware_accelerated = encoder_impl_info.is_hardware_accelerated; - encoder_info.has_internal_source = encoder_impl_info.has_internal_source; + encoder_info.is_qp_trusted = encoder_impl_info.is_qp_trusted; } else { encoder_info.implementation_name += ", "; encoder_info.implementation_name += encoder_impl_info.implementation_name; @@ -895,8 +916,11 @@ VideoEncoder::EncoderInfo SimulcastEncoderAdapter::GetEncoderInfo() const { encoder_info.is_hardware_accelerated |= encoder_impl_info.is_hardware_accelerated; - // Has internal source only if all encoders have it. - encoder_info.has_internal_source &= encoder_impl_info.has_internal_source; + // Treat QP from frame/slice/tile header as average QP only if all + // encoders report it as average QP. + encoder_info.is_qp_trusted = + encoder_info.is_qp_trusted.value_or(true) && + encoder_impl_info.is_qp_trusted.value_or(true); } encoder_info.fps_allocation[i] = encoder_impl_info.fps_allocation[0]; encoder_info.requested_resolution_alignment = cricket::LeastCommonMultiple( diff --git a/media/engine/simulcast_encoder_adapter.h b/media/engine/simulcast_encoder_adapter.h index 07e3ccd024..e6b6badbe5 100644 --- a/media/engine/simulcast_encoder_adapter.h +++ b/media/engine/simulcast_encoder_adapter.h @@ -25,8 +25,8 @@ #include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_encoder.h" #include "api/video_codecs/video_encoder_factory.h" +#include "common_video/framerate_controller.h" #include "modules/video_coding/include/video_codec_interface.h" -#include "modules/video_coding/utility/framerate_controller.h" #include "rtc_base/atomic_ops.h" #include "rtc_base/experiments/encoder_info_settings.h" #include "rtc_base/system/no_unique_address.h" @@ -43,8 +43,8 @@ class RTC_EXPORT SimulcastEncoderAdapter : public VideoEncoder { // TODO(bugs.webrtc.org/11000): Remove when downstream usage is gone. SimulcastEncoderAdapter(VideoEncoderFactory* primarty_factory, const SdpVideoFormat& format); - // |primary_factory| produces the first-choice encoders to use. - // |fallback_factory|, if non-null, is used to create fallback encoder that + // `primary_factory` produces the first-choice encoders to use. + // `fallback_factory`, if non-null, is used to create fallback encoder that // will be used if InitEncode() fails for the primary encoder. SimulcastEncoderAdapter(VideoEncoderFactory* primary_factory, VideoEncoderFactory* fallback_factory, @@ -120,11 +120,11 @@ class RTC_EXPORT SimulcastEncoderAdapter : public VideoEncoder { void set_is_keyframe_needed() { is_keyframe_needed_ = true; } bool is_paused() const { return is_paused_; } void set_is_paused(bool is_paused) { is_paused_ = is_paused; } - absl::optional target_fps() const { + absl::optional target_fps() const { return framerate_controller_ == nullptr ? absl::nullopt - : absl::optional( - framerate_controller_->GetTargetRate()); + : absl::optional( + framerate_controller_->GetMaxFramerate()); } std::unique_ptr ReleaseEncoderContext() &&; @@ -147,7 +147,7 @@ class RTC_EXPORT SimulcastEncoderAdapter : public VideoEncoder { void DestroyStoredEncoders(); // This method creates encoder. May reuse previously created encoders from - // |cached_encoder_contexts_|. It's const because it's used from + // `cached_encoder_contexts_`. It's const because it's used from // const GetEncoderInfo(). std::unique_ptr FetchOrCreateEncoderContext( bool is_lowest_quality_stream) const; @@ -182,7 +182,7 @@ class RTC_EXPORT SimulcastEncoderAdapter : public VideoEncoder { // Store previously created and released encoders , so they don't have to be // recreated. Remaining encoders are destroyed by the destructor. - // Marked as |mutable| becuase we may need to temporarily create encoder in + // Marked as `mutable` becuase we may need to temporarily create encoder in // GetEncoderInfo(), which is const. mutable std::list> cached_encoder_contexts_; diff --git a/media/engine/simulcast_encoder_adapter_unittest.cc b/media/engine/simulcast_encoder_adapter_unittest.cc index 48e005f1c2..bb2335cb5b 100644 --- a/media/engine/simulcast_encoder_adapter_unittest.cc +++ b/media/engine/simulcast_encoder_adapter_unittest.cc @@ -171,6 +171,9 @@ class MockVideoEncoderFactory : public VideoEncoderFactory { const std::vector& encoders() const; void SetEncoderNames(const std::vector& encoder_names); + void set_create_video_encode_return_nullptr(bool return_nullptr) { + create_video_encoder_return_nullptr_ = return_nullptr; + } void set_init_encode_return_value(int32_t value); void set_requested_resolution_alignments( std::vector requested_resolution_alignments) { @@ -183,10 +186,11 @@ class MockVideoEncoderFactory : public VideoEncoderFactory { void DestroyVideoEncoder(VideoEncoder* encoder); private: + bool create_video_encoder_return_nullptr_ = false; int32_t init_encode_return_value_ = 0; std::vector encoders_; std::vector encoder_names_; - // Keep number of entries in sync with |kMaxSimulcastStreams|. + // Keep number of entries in sync with `kMaxSimulcastStreams`. std::vector requested_resolution_alignments_ = {1, 1, 1}; bool supports_simulcast_ = false; }; @@ -238,9 +242,9 @@ class MockVideoEncoder : public VideoEncoder { apply_alignment_to_all_simulcast_layers_; info.has_trusted_rate_controller = has_trusted_rate_controller_; info.is_hardware_accelerated = is_hardware_accelerated_; - info.has_internal_source = has_internal_source_; info.fps_allocation[0] = fps_allocation_; info.supports_simulcast = supports_simulcast_; + info.is_qp_trusted = is_qp_trusted_; return info; } @@ -290,10 +294,6 @@ class MockVideoEncoder : public VideoEncoder { is_hardware_accelerated_ = is_hardware_accelerated; } - void set_has_internal_source(bool has_internal_source) { - has_internal_source_ = has_internal_source; - } - void set_fps_allocation(const FramerateFractions& fps_allocation) { fps_allocation_ = fps_allocation; } @@ -308,6 +308,10 @@ class MockVideoEncoder : public VideoEncoder { video_format_ = video_format; } + void set_is_qp_trusted(absl::optional is_qp_trusted) { + is_qp_trusted_ = is_qp_trusted; + } + bool supports_simulcast() const { return supports_simulcast_; } SdpVideoFormat video_format() const { return video_format_; } @@ -321,11 +325,11 @@ class MockVideoEncoder : public VideoEncoder { bool apply_alignment_to_all_simulcast_layers_ = false; bool has_trusted_rate_controller_ = false; bool is_hardware_accelerated_ = false; - bool has_internal_source_ = false; int32_t init_encode_return_value_ = 0; VideoEncoder::RateControlParameters last_set_rates_; FramerateFractions fps_allocation_; bool supports_simulcast_ = false; + absl::optional is_qp_trusted_; SdpVideoFormat video_format_; VideoCodec codec_; @@ -340,6 +344,10 @@ std::vector MockVideoEncoderFactory::GetSupportedFormats() std::unique_ptr MockVideoEncoderFactory::CreateVideoEncoder( const SdpVideoFormat& format) { + if (create_video_encoder_return_nullptr_) { + return nullptr; + } + auto encoder = std::make_unique<::testing::NiceMock>(this); encoder->set_init_encode_return_value(init_encode_return_value_); const char* encoder_name = encoder_names_.empty() @@ -387,7 +395,7 @@ class TestSimulcastEncoderAdapterFakeHelper { video_format_(video_format) {} // Can only be called once as the SimulcastEncoderAdapter will take the - // ownership of |factory_|. + // ownership of `factory_`. VideoEncoder* CreateMockEncoderAdapter() { return new SimulcastEncoderAdapter(primary_factory_.get(), fallback_factory_.get(), video_format_); @@ -433,8 +441,8 @@ class TestSimulcastEncoderAdapterFake : public ::testing::Test, void ReSetUp() { if (adapter_) { adapter_->Release(); - // |helper_| owns factories which |adapter_| needs to destroy encoders. - // Release |adapter_| before |helper_| (released in SetUp()). + // `helper_` owns factories which `adapter_` needs to destroy encoders. + // Release `adapter_` before `helper_` (released in SetUp()). adapter_.reset(); } SetUp(); @@ -755,7 +763,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, DoesNotLeakEncoders) { EXPECT_EQ(3u, helper_->factory()->encoders().size()); // The adapter should destroy all encoders it has allocated. Since - // |helper_->factory()| is owned by |adapter_|, however, we need to rely on + // `helper_->factory()` is owned by `adapter_`, however, we need to rely on // lsan to find leaks here. EXPECT_EQ(0, adapter_->Release()); adapter_.reset(); @@ -978,7 +986,7 @@ class FakeNativeBufferI420 : public VideoFrameBuffer { if (allow_to_i420_) { return I420Buffer::Create(width_, height_); } else { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } return nullptr; } @@ -1365,7 +1373,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, VideoEncoder::ResolutionBitrateLimits{789, 33000, 66000, 99000})); } -TEST_F(TestSimulcastEncoderAdapterFake, ReportsInternalSource) { +TEST_F(TestSimulcastEncoderAdapterFake, ReportsIsQpTrusted) { SimulcastTestFixtureImpl::DefaultSettings( &codec_, static_cast(kTestTemporalLayerProfile), kVideoCodecVP8); @@ -1376,15 +1384,15 @@ TEST_F(TestSimulcastEncoderAdapterFake, ReportsInternalSource) { // All encoders have internal source, simulcast adapter reports true. for (MockVideoEncoder* encoder : helper_->factory()->encoders()) { - encoder->set_has_internal_source(true); + encoder->set_is_qp_trusted(true); } EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings)); - EXPECT_TRUE(adapter_->GetEncoderInfo().has_internal_source); + EXPECT_TRUE(adapter_->GetEncoderInfo().is_qp_trusted.value_or(false)); - // One encoder does not have internal source, simulcast adapter reports false. - helper_->factory()->encoders()[2]->set_has_internal_source(false); + // One encoder reports QP not trusted, simulcast adapter reports false. + helper_->factory()->encoders()[2]->set_is_qp_trusted(false); EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings)); - EXPECT_FALSE(adapter_->GetEncoderInfo().has_internal_source); + EXPECT_FALSE(adapter_->GetEncoderInfo().is_qp_trusted.value_or(true)); } TEST_F(TestSimulcastEncoderAdapterFake, ReportsFpsAllocation) { @@ -1683,5 +1691,46 @@ TEST_F(TestSimulcastEncoderAdapterFake, EXPECT_NE(helper_->factory()->encoders()[0], prev_encoder); } +TEST_F(TestSimulcastEncoderAdapterFake, + UseFallbackEncoderIfCreatePrimaryEncoderFailed) { + // Enable support for fallback encoder factory and re-setup. + use_fallback_factory_ = true; + SetUp(); + SimulcastTestFixtureImpl::DefaultSettings( + &codec_, static_cast(kTestTemporalLayerProfile), + kVideoCodecVP8); + codec_.numberOfSimulcastStreams = 1; + helper_->factory()->SetEncoderNames({"primary"}); + helper_->fallback_factory()->SetEncoderNames({"fallback"}); + + // Emulate failure at creating of primary encoder and verify that SEA switches + // to fallback encoder. + helper_->factory()->set_create_video_encode_return_nullptr(true); + EXPECT_EQ(0, adapter_->InitEncode(&codec_, kSettings)); + ASSERT_EQ(0u, helper_->factory()->encoders().size()); + ASSERT_EQ(1u, helper_->fallback_factory()->encoders().size()); + EXPECT_EQ("fallback", adapter_->GetEncoderInfo().implementation_name); +} + +TEST_F(TestSimulcastEncoderAdapterFake, + InitEncodeReturnsErrorIfEncoderCannotBeCreated) { + // Enable support for fallback encoder factory and re-setup. + use_fallback_factory_ = true; + SetUp(); + SimulcastTestFixtureImpl::DefaultSettings( + &codec_, static_cast(kTestTemporalLayerProfile), + kVideoCodecVP8); + codec_.numberOfSimulcastStreams = 1; + helper_->factory()->SetEncoderNames({"primary"}); + helper_->fallback_factory()->SetEncoderNames({"fallback"}); + + // Emulate failure at creating of primary and fallback encoders and verify + // that `InitEncode` returns an error. + helper_->factory()->set_create_video_encode_return_nullptr(true); + helper_->fallback_factory()->set_create_video_encode_return_nullptr(true); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_MEMORY, + adapter_->InitEncode(&codec_, kSettings)); +} + } // namespace test } // namespace webrtc diff --git a/media/engine/unhandled_packets_buffer.cc b/media/engine/unhandled_packets_buffer.cc index ebc841e1fc..cb6f0ec335 100644 --- a/media/engine/unhandled_packets_buffer.cc +++ b/media/engine/unhandled_packets_buffer.cc @@ -35,7 +35,7 @@ void UnhandledPacketsBuffer::AddPacket(uint32_t ssrc, insert_pos_ = (insert_pos_ + 1) % kMaxStashedPackets; } -// Backfill |consumer| with all stored packet related |ssrcs|. +// Backfill `consumer` with all stored packet related `ssrcs`. void UnhandledPacketsBuffer::BackfillPackets( rtc::ArrayView ssrcs, std::function consumer) { diff --git a/media/engine/unhandled_packets_buffer.h b/media/engine/unhandled_packets_buffer.h index ef03588165..63a6195c46 100644 --- a/media/engine/unhandled_packets_buffer.h +++ b/media/engine/unhandled_packets_buffer.h @@ -35,7 +35,7 @@ class UnhandledPacketsBuffer { int64_t packet_time_us, rtc::CopyOnWriteBuffer packet); - // Feed all packets with |ssrcs| into |consumer|. + // Feed all packets with `ssrcs` into `consumer`. void BackfillPackets( rtc::ArrayView ssrcs, std::function consumer); diff --git a/media/engine/webrtc_media_engine.cc b/media/engine/webrtc_media_engine.cc index 7ac666ec9e..f083b9c9ca 100644 --- a/media/engine/webrtc_media_engine.cc +++ b/media/engine/webrtc_media_engine.cc @@ -10,6 +10,7 @@ #include "media/engine/webrtc_media_engine.h" +#include #include #include @@ -27,7 +28,7 @@ namespace cricket { std::unique_ptr CreateMediaEngine( MediaEngineDependencies dependencies) { - // TODO(sprang): Make populating |dependencies.trials| mandatory and remove + // TODO(sprang): Make populating `dependencies.trials` mandatory and remove // these fallbacks. std::unique_ptr fallback_trials( dependencies.trials ? nullptr : new webrtc::FieldTrialBasedConfig()); @@ -74,7 +75,8 @@ void DiscardRedundantExtensions( } // namespace bool ValidateRtpExtensions( - const std::vector& extensions) { + rtc::ArrayView extensions, + rtc::ArrayView old_extensions) { bool id_used[1 + webrtc::RtpExtension::kMaxId] = {false}; for (const auto& extension : extensions) { if (extension.id < webrtc::RtpExtension::kMinId || @@ -89,6 +91,45 @@ bool ValidateRtpExtensions( } id_used[extension.id] = true; } + // Validate the extension list against the already negotiated extensions. + // Re-registering is OK, re-mapping (either same URL at new ID or same + // ID used with new URL) is an illegal remap. + + // This is required in order to avoid a crash when registering an + // extension. A better structure would use the registered extensions + // in the RTPSender. This requires spinning through: + // + // WebRtcVoiceMediaChannel::::WebRtcAudioSendStream::stream_ (pointer) + // AudioSendStream::rtp_rtcp_module_ (pointer) + // ModuleRtpRtcpImpl2::rtp_sender_ (pointer) + // RtpSenderContext::packet_generator (struct member) + // RTPSender::rtp_header_extension_map_ (class member) + // + // Getting at this seems like a hard slog. + if (!old_extensions.empty()) { + absl::string_view urimap[1 + webrtc::RtpExtension::kMaxId]; + std::map idmap; + for (const auto& old_extension : old_extensions) { + urimap[old_extension.id] = old_extension.uri; + idmap[old_extension.uri] = old_extension.id; + } + for (const auto& extension : extensions) { + if (!urimap[extension.id].empty() && + urimap[extension.id] != extension.uri) { + RTC_LOG(LS_ERROR) << "Extension negotiation failure: " << extension.id + << " was mapped to " << urimap[extension.id] + << " but is proposed changed to " << extension.uri; + return false; + } + const auto& it = idmap.find(extension.uri); + if (it != idmap.end() && it->second != extension.id) { + RTC_LOG(LS_ERROR) << "Extension negotation failure: " << extension.uri + << " was identified by " << it->second + << " but is proposed changed to " << extension.id; + return false; + } + } + } return true; } @@ -97,7 +138,8 @@ std::vector FilterRtpExtensions( bool (*supported)(absl::string_view), bool filter_redundant_extensions, const webrtc::WebRtcKeyValueConfig& trials) { - RTC_DCHECK(ValidateRtpExtensions(extensions)); + // Don't check against old parameters; this should have been done earlier. + RTC_DCHECK(ValidateRtpExtensions(extensions, {})); RTC_DCHECK(supported); std::vector result; diff --git a/media/engine/webrtc_media_engine.h b/media/engine/webrtc_media_engine.h index 34ec4cdc9c..ff977609b2 100644 --- a/media/engine/webrtc_media_engine.h +++ b/media/engine/webrtc_media_engine.h @@ -63,8 +63,11 @@ RTC_EXPORT std::unique_ptr CreateMediaEngine( MediaEngineDependencies dependencies); // Verify that extension IDs are within 1-byte extension range and are not -// overlapping. -bool ValidateRtpExtensions(const std::vector& extensions); +// overlapping, and that they form a legal change from previously registerd +// extensions (if any). +bool ValidateRtpExtensions( + rtc::ArrayView extennsions, + rtc::ArrayView old_extensions); // Discard any extensions not validated by the 'supported' predicate. Duplicate // extensions are removed if 'filter_redundant_extensions' is set, and also any diff --git a/media/engine/webrtc_media_engine_unittest.cc b/media/engine/webrtc_media_engine_unittest.cc index 78d13fc297..81982fae2b 100644 --- a/media/engine/webrtc_media_engine_unittest.cc +++ b/media/engine/webrtc_media_engine_unittest.cc @@ -66,41 +66,68 @@ bool IsSorted(const std::vector& extensions) { } } // namespace -TEST(WebRtcMediaEngineTest, ValidateRtpExtensions_EmptyList) { +TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsEmptyList) { std::vector extensions; - EXPECT_TRUE(ValidateRtpExtensions(extensions)); + EXPECT_TRUE(ValidateRtpExtensions(extensions, {})); } -TEST(WebRtcMediaEngineTest, ValidateRtpExtensions_AllGood) { +TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsAllGood) { std::vector extensions = MakeUniqueExtensions(); - EXPECT_TRUE(ValidateRtpExtensions(extensions)); + EXPECT_TRUE(ValidateRtpExtensions(extensions, {})); } -TEST(WebRtcMediaEngineTest, ValidateRtpExtensions_OutOfRangeId_Low) { +TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsOutOfRangeId_Low) { std::vector extensions = MakeUniqueExtensions(); extensions.push_back(RtpExtension("foo", 0)); - EXPECT_FALSE(ValidateRtpExtensions(extensions)); + EXPECT_FALSE(ValidateRtpExtensions(extensions, {})); } -TEST(WebRtcMediaEngineTest, ValidateRtpExtensions_OutOfRangeId_High) { +TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsOutOfRangeIdHigh) { std::vector extensions = MakeUniqueExtensions(); extensions.push_back(RtpExtension("foo", 256)); - EXPECT_FALSE(ValidateRtpExtensions(extensions)); + EXPECT_FALSE(ValidateRtpExtensions(extensions, {})); } -TEST(WebRtcMediaEngineTest, ValidateRtpExtensions_OverlappingIds_StartOfSet) { +TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsOverlappingIdsStartOfSet) { std::vector extensions = MakeUniqueExtensions(); extensions.push_back(RtpExtension("foo", 1)); - EXPECT_FALSE(ValidateRtpExtensions(extensions)); + EXPECT_FALSE(ValidateRtpExtensions(extensions, {})); } -TEST(WebRtcMediaEngineTest, ValidateRtpExtensions_OverlappingIds_EndOfSet) { +TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsOverlappingIdsEndOfSet) { std::vector extensions = MakeUniqueExtensions(); extensions.push_back(RtpExtension("foo", 255)); - EXPECT_FALSE(ValidateRtpExtensions(extensions)); + EXPECT_FALSE(ValidateRtpExtensions(extensions, {})); } -TEST(WebRtcMediaEngineTest, FilterRtpExtensions_EmptyList) { +TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsEmptyToEmpty) { + std::vector extensions; + EXPECT_TRUE(ValidateRtpExtensions(extensions, extensions)); +} + +TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsNoChange) { + std::vector extensions = MakeUniqueExtensions(); + EXPECT_TRUE(ValidateRtpExtensions(extensions, extensions)); +} + +TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsChangeIdNotUrl) { + std::vector old_extensions = MakeUniqueExtensions(); + std::vector new_extensions = old_extensions; + std::swap(new_extensions[0].id, new_extensions[1].id); + + EXPECT_FALSE(ValidateRtpExtensions(new_extensions, old_extensions)); +} + +TEST(WebRtcMediaEngineTest, ValidateRtpExtensionsChangeIdForUrl) { + std::vector old_extensions = MakeUniqueExtensions(); + std::vector new_extensions = old_extensions; + // Change first extension to something not generated by MakeUniqueExtensions + new_extensions[0].id = 123; + + EXPECT_FALSE(ValidateRtpExtensions(new_extensions, old_extensions)); +} + +TEST(WebRtcMediaEngineTest, FilterRtpExtensionsEmptyList) { std::vector extensions; webrtc::FieldTrialBasedConfig trials; std::vector filtered = @@ -108,7 +135,7 @@ TEST(WebRtcMediaEngineTest, FilterRtpExtensions_EmptyList) { EXPECT_EQ(0u, filtered.size()); } -TEST(WebRtcMediaEngineTest, FilterRtpExtensions_IncludeOnlySupported) { +TEST(WebRtcMediaEngineTest, FilterRtpExtensionsIncludeOnlySupported) { std::vector extensions = MakeUniqueExtensions(); webrtc::FieldTrialBasedConfig trials; std::vector filtered = @@ -118,7 +145,7 @@ TEST(WebRtcMediaEngineTest, FilterRtpExtensions_IncludeOnlySupported) { EXPECT_EQ("i", filtered[1].uri); } -TEST(WebRtcMediaEngineTest, FilterRtpExtensions_SortedByName_1) { +TEST(WebRtcMediaEngineTest, FilterRtpExtensionsSortedByName1) { std::vector extensions = MakeUniqueExtensions(); webrtc::FieldTrialBasedConfig trials; std::vector filtered = @@ -127,7 +154,7 @@ TEST(WebRtcMediaEngineTest, FilterRtpExtensions_SortedByName_1) { EXPECT_TRUE(IsSorted(filtered)); } -TEST(WebRtcMediaEngineTest, FilterRtpExtensions_SortedByName_2) { +TEST(WebRtcMediaEngineTest, FilterRtpExtensionsSortedByName2) { std::vector extensions = MakeUniqueExtensions(); webrtc::FieldTrialBasedConfig trials; std::vector filtered = @@ -136,7 +163,7 @@ TEST(WebRtcMediaEngineTest, FilterRtpExtensions_SortedByName_2) { EXPECT_TRUE(IsSorted(filtered)); } -TEST(WebRtcMediaEngineTest, FilterRtpExtensions_DontRemoveRedundant) { +TEST(WebRtcMediaEngineTest, FilterRtpExtensionsDontRemoveRedundant) { std::vector extensions = MakeRedundantExtensions(); webrtc::FieldTrialBasedConfig trials; std::vector filtered = @@ -146,7 +173,7 @@ TEST(WebRtcMediaEngineTest, FilterRtpExtensions_DontRemoveRedundant) { EXPECT_EQ(filtered[0].uri, filtered[1].uri); } -TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundant) { +TEST(WebRtcMediaEngineTest, FilterRtpExtensionsRemoveRedundant) { std::vector extensions = MakeRedundantExtensions(); webrtc::FieldTrialBasedConfig trials; std::vector filtered = @@ -156,7 +183,7 @@ TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundant) { EXPECT_NE(filtered[0].uri, filtered[1].uri); } -TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundantEncrypted_1) { +TEST(WebRtcMediaEngineTest, FilterRtpExtensionsRemoveRedundantEncrypted1) { std::vector extensions; extensions.push_back(webrtc::RtpExtension("b", 1)); extensions.push_back(webrtc::RtpExtension("b", 2, true)); @@ -173,7 +200,7 @@ TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundantEncrypted_1) { EXPECT_NE(filtered[1].uri, filtered[2].uri); } -TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundantEncrypted_2) { +TEST(WebRtcMediaEngineTest, FilterRtpExtensionsRemoveRedundantEncrypted2) { std::vector extensions; extensions.push_back(webrtc::RtpExtension("b", 1, true)); extensions.push_back(webrtc::RtpExtension("b", 2)); @@ -190,7 +217,7 @@ TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundantEncrypted_2) { EXPECT_NE(filtered[1].uri, filtered[2].uri); } -TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundantBwe_1) { +TEST(WebRtcMediaEngineTest, FilterRtpExtensionsRemoveRedundantBwe1) { webrtc::test::ScopedFieldTrials override_field_trials_( "WebRTC-FilterAbsSendTimeExtension/Enabled/"); webrtc::FieldTrialBasedConfig trials; @@ -209,7 +236,7 @@ TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundantBwe_1) { } TEST(WebRtcMediaEngineTest, - FilterRtpExtensions_RemoveRedundantBwe_1_KeepAbsSendTime) { + FilterRtpExtensionsRemoveRedundantBwe1KeepAbsSendTime) { std::vector extensions; extensions.push_back( RtpExtension(RtpExtension::kTransportSequenceNumberUri, 3)); @@ -226,7 +253,7 @@ TEST(WebRtcMediaEngineTest, EXPECT_EQ(RtpExtension::kAbsSendTimeUri, filtered[1].uri); } -TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundantBweEncrypted_1) { +TEST(WebRtcMediaEngineTest, FilterRtpExtensionsRemoveRedundantBweEncrypted1) { webrtc::test::ScopedFieldTrials override_field_trials_( "WebRTC-FilterAbsSendTimeExtension/Enabled/"); webrtc::FieldTrialBasedConfig trials; @@ -251,7 +278,7 @@ TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundantBweEncrypted_1) { } TEST(WebRtcMediaEngineTest, - FilterRtpExtensions_RemoveRedundantBweEncrypted_1_KeepAbsSendTime) { + FilterRtpExtensionsRemoveRedundantBweEncrypted1KeepAbsSendTime) { std::vector extensions; extensions.push_back( RtpExtension(RtpExtension::kTransportSequenceNumberUri, 3)); @@ -274,7 +301,7 @@ TEST(WebRtcMediaEngineTest, EXPECT_NE(filtered[0].encrypt, filtered[1].encrypt); } -TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundantBwe_2) { +TEST(WebRtcMediaEngineTest, FilterRtpExtensionsRemoveRedundantBwe2) { std::vector extensions; extensions.push_back(RtpExtension(RtpExtension::kTimestampOffsetUri, 1)); extensions.push_back(RtpExtension(RtpExtension::kAbsSendTimeUri, 14)); @@ -286,7 +313,7 @@ TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundantBwe_2) { EXPECT_EQ(RtpExtension::kAbsSendTimeUri, filtered[0].uri); } -TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundantBwe_3) { +TEST(WebRtcMediaEngineTest, FilterRtpExtensionsRemoveRedundantBwe3) { std::vector extensions; extensions.push_back(RtpExtension(RtpExtension::kTimestampOffsetUri, 2)); extensions.push_back(RtpExtension(RtpExtension::kTimestampOffsetUri, 14)); diff --git a/media/engine/webrtc_video_engine.cc b/media/engine/webrtc_video_engine.cc index 38a210ee7d..58ae4995d7 100644 --- a/media/engine/webrtc_video_engine.cc +++ b/media/engine/webrtc_video_engine.cc @@ -31,6 +31,7 @@ #include "media/engine/simulcast.h" #include "media/engine/webrtc_media_engine.h" #include "media/engine/webrtc_voice_engine.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "rtc_base/copy_on_write_buffer.h" #include "rtc_base/experiments/field_trial_parser.h" #include "rtc_base/experiments/field_trial_units.h" @@ -46,9 +47,28 @@ namespace cricket { namespace { +using ::webrtc::ParseRtpPayloadType; +using ::webrtc::ParseRtpSsrc; + const int kMinLayerSize = 16; constexpr int64_t kUnsignaledSsrcCooldownMs = rtc::kNumMillisecsPerSec / 2; +// TODO(bugs.webrtc.org/13166): Remove AV1X when backwards compatibility is not +// needed. +constexpr char kAv1xCodecName[] = "AV1X"; + +int ScaleDownResolution(int resolution, + double scale_down_by, + int min_resolution) { + // Resolution is never scalied down to smaller than min_resolution. + // If the input resolution is already smaller than min_resolution, + // no scaling should be done at all. + if (resolution <= min_resolution) + return resolution; + return std::max(static_cast(resolution / scale_down_by + 0.5), + min_resolution); +} + const char* StreamTypeToString( webrtc::VideoSendStream::StreamStats::StreamType type) { switch (type) { @@ -106,6 +126,30 @@ void AddDefaultFeedbackParams(VideoCodec* codec, } } +// Helper function to determine whether a codec should use the [35, 63] range. +// Should be used when adding new codecs (or variants). +bool IsCodecValidForLowerRange(const VideoCodec& codec) { + if (absl::EqualsIgnoreCase(codec.name, kFlexfecCodecName) || + absl::EqualsIgnoreCase(codec.name, kAv1CodecName) || + absl::EqualsIgnoreCase(codec.name, kAv1xCodecName)) { + return true; + } else if (absl::EqualsIgnoreCase(codec.name, kH264CodecName)) { + std::string profileLevelId; + std::string packetizationMode; + + if (codec.GetParam(kH264FmtpProfileLevelId, &profileLevelId)) { + if (absl::StartsWithIgnoreCase(profileLevelId, "4d00")) { + if (codec.GetParam(kH264FmtpPacketizationMode, &packetizationMode)) { + return packetizationMode == "0"; + } + } + // H264 with YUV444. + return absl::StartsWithIgnoreCase(profileLevelId, "f400"); + } + } + return false; +} + // This function will assign dynamic payload types (in the range [96, 127] // and then [35, 63]) to the input codecs, and also add ULPFEC, RED, FlexFEC, // and associated RTX codecs for recognized codecs (VP8, VP9, H264, and RED). @@ -166,9 +210,6 @@ std::vector GetPayloadTypesAndDefaultCodecs( std::vector output_codecs; for (const webrtc::SdpVideoFormat& format : supported_formats) { VideoCodec codec(format); - bool isCodecValidForLowerRange = - absl::EqualsIgnoreCase(codec.name, kFlexfecCodecName) || - absl::EqualsIgnoreCase(codec.name, kAv1CodecName); bool isFecCodec = absl::EqualsIgnoreCase(codec.name, kUlpfecCodecName) || absl::EqualsIgnoreCase(codec.name, kFlexfecCodecName); @@ -184,7 +225,7 @@ std::vector GetPayloadTypesAndDefaultCodecs( // Lower range gets used for "new" codecs or when running out of payload // types in the upper range. - if (isCodecValidForLowerRange || + if (IsCodecValidForLowerRange(codec) || payload_type_upper >= kLastDynamicPayloadTypeUpperRange) { codec.id = payload_type_lower++; } else { @@ -204,7 +245,7 @@ std::vector GetPayloadTypesAndDefaultCodecs( RTC_DCHECK_EQ(payload_type_upper, kLastDynamicPayloadTypeUpperRange); break; } - if (isCodecValidForLowerRange || + if (IsCodecValidForLowerRange(codec) || payload_type_upper >= kLastDynamicPayloadTypeUpperRange) { output_codecs.push_back( VideoCodec::CreateRtxCodec(payload_type_lower++, codec.id)); @@ -291,10 +332,16 @@ static bool ValidateStreamParams(const StreamParams& sp) { // Returns true if the given codec is disallowed from doing simulcast. bool IsCodecDisabledForSimulcast(const std::string& codec_name, const webrtc::WebRtcKeyValueConfig& trials) { - return !absl::StartsWith(trials.Lookup("WebRTC-H264Simulcast"), "Disabled") - ? absl::EqualsIgnoreCase(codec_name, kVp9CodecName) - : absl::EqualsIgnoreCase(codec_name, kH264CodecName) || - absl::EqualsIgnoreCase(codec_name, kVp9CodecName); + if (absl::EqualsIgnoreCase(codec_name, kVp9CodecName) || + absl::EqualsIgnoreCase(codec_name, kAv1CodecName)) { + return true; + } + + if (absl::EqualsIgnoreCase(codec_name, kH264CodecName)) { + return absl::StartsWith(trials.Lookup("WebRTC-H264Simulcast"), "Disabled"); + } + + return false; } // The selected thresholds for QVGA and VGA corresponded to a QP around 10. @@ -317,49 +364,6 @@ static int GetMaxDefaultVideoBitrateKbps(int width, return max_bitrate; } -bool GetVp9LayersFromFieldTrialGroup( - size_t* num_spatial_layers, - size_t* num_temporal_layers, - const webrtc::WebRtcKeyValueConfig& trials) { - std::string group = trials.Lookup("WebRTC-SupportVP9SVC"); - if (group.empty()) - return false; - - if (sscanf(group.c_str(), "EnabledByFlag_%zuSL%zuTL", num_spatial_layers, - num_temporal_layers) != 2) { - return false; - } - if (*num_spatial_layers > webrtc::kMaxSpatialLayers || - *num_spatial_layers < 1) - return false; - - const size_t kMaxTemporalLayers = 3; - if (*num_temporal_layers > kMaxTemporalLayers || *num_temporal_layers < 1) - return false; - - return true; -} - -absl::optional GetVp9SpatialLayersFromFieldTrial( - const webrtc::WebRtcKeyValueConfig& trials) { - size_t num_sl; - size_t num_tl; - if (GetVp9LayersFromFieldTrialGroup(&num_sl, &num_tl, trials)) { - return num_sl; - } - return absl::nullopt; -} - -absl::optional GetVp9TemporalLayersFromFieldTrial( - const webrtc::WebRtcKeyValueConfig& trials) { - size_t num_sl; - size_t num_tl; - if (GetVp9LayersFromFieldTrialGroup(&num_sl, &num_tl, trials)) { - return num_tl; - } - return absl::nullopt; -} - // Returns its smallest positive argument. If neither argument is positive, // returns an arbitrary nonpositive value. int MinPositive(int a, int b) { @@ -444,7 +448,7 @@ MergeInfoAboutOutboundRtpSubstreams( webrtc::VideoSendStream::StreamStats& rtp_substream = rtp_substreams[media_ssrc]; - // We only merge |rtp_stats|. All other metrics are not applicable for RTX + // We only merge `rtp_stats`. All other metrics are not applicable for RTX // and FlexFEC. // TODO(hbos): kRtx and kFlexfec stats should use a separate struct to make // it clear what is or is not applicable. @@ -482,7 +486,6 @@ WebRtcVideoChannel::WebRtcVideoSendStream::ConfigureVideoEncoderSettings( (parameters_.config.rtp.ssrcs.size() == 1 || NumActiveStreams(rtp_parameters_) == 1); - bool frame_dropping = !is_screencast; bool denoising; bool codec_default_denoising = false; if (is_screencast) { @@ -496,7 +499,6 @@ WebRtcVideoChannel::WebRtcVideoSendStream::ConfigureVideoEncoderSettings( if (absl::EqualsIgnoreCase(codec.name, kH264CodecName)) { webrtc::VideoCodecH264 h264_settings = webrtc::VideoEncoder::GetDefaultH264Settings(); - h264_settings.frameDroppingOn = frame_dropping; return rtc::make_ref_counted< webrtc::VideoEncoderConfig::H264EncoderSpecificSettings>(h264_settings); } @@ -506,29 +508,20 @@ WebRtcVideoChannel::WebRtcVideoSendStream::ConfigureVideoEncoderSettings( vp8_settings.automaticResizeOn = automatic_resize; // VP8 denoising is enabled by default. vp8_settings.denoisingOn = codec_default_denoising ? true : denoising; - vp8_settings.frameDroppingOn = frame_dropping; return rtc::make_ref_counted< webrtc::VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings); } if (absl::EqualsIgnoreCase(codec.name, kVp9CodecName)) { webrtc::VideoCodecVP9 vp9_settings = webrtc::VideoEncoder::GetDefaultVp9Settings(); - const size_t default_num_spatial_layers = - parameters_.config.rtp.ssrcs.size(); - const size_t num_spatial_layers = - GetVp9SpatialLayersFromFieldTrial(call_->trials()) - .value_or(default_num_spatial_layers); - - const size_t default_num_temporal_layers = - num_spatial_layers > 1 ? kConferenceDefaultNumTemporalLayers : 1; - const size_t num_temporal_layers = - GetVp9TemporalLayersFromFieldTrial(call_->trials()) - .value_or(default_num_temporal_layers); vp9_settings.numberOfSpatialLayers = std::min( - num_spatial_layers, kConferenceMaxNumSpatialLayers); - vp9_settings.numberOfTemporalLayers = std::min( - num_temporal_layers, kConferenceMaxNumTemporalLayers); + parameters_.config.rtp.ssrcs.size(), kConferenceMaxNumSpatialLayers); + vp9_settings.numberOfTemporalLayers = + std::min(parameters_.config.rtp.ssrcs.size() > 1 + ? kConferenceDefaultNumTemporalLayers + : 1, + kConferenceMaxNumTemporalLayers); // VP9 denoising is disabled by default. vp9_settings.denoisingOn = codec_default_denoising ? true : denoising; @@ -536,15 +529,16 @@ WebRtcVideoChannel::WebRtcVideoSendStream::ConfigureVideoEncoderSettings( // Ensure frame dropping is always enabled. RTC_DCHECK(vp9_settings.frameDroppingOn); if (!is_screencast) { - webrtc::FieldTrialFlag interlayer_pred_experiment_enabled = - webrtc::FieldTrialFlag("Enabled"); + webrtc::FieldTrialFlag interlayer_pred_experiment_enabled("Enabled"); webrtc::FieldTrialEnum inter_layer_pred_mode( "inter_layer_pred_mode", webrtc::InterLayerPredMode::kOnKeyPic, {{"off", webrtc::InterLayerPredMode::kOff}, {"on", webrtc::InterLayerPredMode::kOn}, {"onkeypic", webrtc::InterLayerPredMode::kOnKeyPic}}); + webrtc::FieldTrialFlag force_flexible_mode("FlexibleMode"); webrtc::ParseFieldTrial( - {&interlayer_pred_experiment_enabled, &inter_layer_pred_mode}, + {&interlayer_pred_experiment_enabled, &inter_layer_pred_mode, + &force_flexible_mode}, call_->trials().Lookup("WebRTC-Vp9InterLayerPred")); if (interlayer_pred_experiment_enabled) { vp9_settings.interLayerPred = inter_layer_pred_mode; @@ -552,6 +546,7 @@ WebRtcVideoChannel::WebRtcVideoSendStream::ConfigureVideoEncoderSettings( // Limit inter-layer prediction to key pictures by default. vp9_settings.interLayerPred = webrtc::InterLayerPredMode::kOnKeyPic; } + vp9_settings.flexibleMode = force_flexible_mode.Get(); } else { // Multiple spatial layers vp9 screenshare needs flexible mode. vp9_settings.flexibleMode = vp9_settings.numberOfSpatialLayers > 1; @@ -698,7 +693,7 @@ WebRtcVideoChannel::WebRtcVideoChannel( webrtc::VideoEncoderFactory* encoder_factory, webrtc::VideoDecoderFactory* decoder_factory, webrtc::VideoBitrateAllocatorFactory* bitrate_allocator_factory) - : VideoMediaChannel(config, call->network_thread()), + : VideoMediaChannel(call->network_thread(), config.enable_dscp), worker_thread_(call->worker_thread()), call_(call), unsignalled_ssrc_handler_(&default_unsignalled_ssrc_handler_), @@ -803,7 +798,7 @@ bool WebRtcVideoChannel::GetChangedSendParameters( const VideoSendParameters& params, ChangedSendParameters* changed_params) const { if (!ValidateCodecFormats(params.codecs) || - !ValidateRtpExtensions(params.extensions)) { + !ValidateRtpExtensions(params.extensions, send_rtp_extensions_)) { return false; } @@ -838,7 +833,7 @@ bool WebRtcVideoChannel::GetChangedSendParameters( std::vector filtered_extensions = FilterRtpExtensions( params.extensions, webrtc::RtpExtension::IsSupportedForVideo, true, call_->trials()); - if (!send_rtp_extensions_ || (*send_rtp_extensions_ != filtered_extensions)) { + if (send_rtp_extensions_ != filtered_extensions) { changed_params->rtp_header_extensions = absl::optional>(filtered_extensions); } @@ -891,6 +886,12 @@ bool WebRtcVideoChannel::SetSendParameters(const VideoSendParameters& params) { } void WebRtcVideoChannel::RequestEncoderFallback() { + if (!worker_thread_->IsCurrent()) { + worker_thread_->PostTask( + ToQueuedTask(task_safety_, [this] { RequestEncoderFallback(); })); + return; + } + RTC_DCHECK_RUN_ON(&thread_checker_); if (negotiated_codecs_.size() <= 1) { RTC_LOG(LS_WARNING) << "Encoder failed but no fallback codec is available"; @@ -905,47 +906,16 @@ void WebRtcVideoChannel::RequestEncoderFallback() { } void WebRtcVideoChannel::RequestEncoderSwitch( - const EncoderSwitchRequestCallback::Config& conf) { - RTC_DCHECK_RUN_ON(&thread_checker_); - - if (!allow_codec_switching_) { - RTC_LOG(LS_INFO) << "Encoder switch requested but codec switching has" - " not been enabled yet."; - requested_encoder_switch_ = conf; + const webrtc::SdpVideoFormat& format, + bool allow_default_fallback) { + if (!worker_thread_->IsCurrent()) { + worker_thread_->PostTask( + ToQueuedTask(task_safety_, [this, format, allow_default_fallback] { + RequestEncoderSwitch(format, allow_default_fallback); + })); return; } - for (const VideoCodecSettings& codec_setting : negotiated_codecs_) { - if (codec_setting.codec.name == conf.codec_name) { - if (conf.param) { - auto it = codec_setting.codec.params.find(*conf.param); - if (it == codec_setting.codec.params.end()) - continue; - - if (conf.value && it->second != *conf.value) - continue; - } - - if (send_codec_ == codec_setting) { - // Already using this codec, no switch required. - return; - } - - ChangedSendParameters params; - params.send_codec = codec_setting; - ApplyChangedParams(params); - return; - } - } - - RTC_LOG(LS_WARNING) << "Requested encoder with codec_name:" << conf.codec_name - << ", param:" << conf.param.value_or("none") - << " and value:" << conf.value.value_or("none") - << "not found. No switch performed."; -} - -void WebRtcVideoChannel::RequestEncoderSwitch( - const webrtc::SdpVideoFormat& format) { RTC_DCHECK_RUN_ON(&thread_checker_); for (const VideoCodecSettings& codec_setting : negotiated_codecs_) { @@ -968,8 +938,13 @@ void WebRtcVideoChannel::RequestEncoderSwitch( } } - RTC_LOG(LS_WARNING) << "Encoder switch failed: SdpVideoFormat " - << format.ToString() << " not negotiated."; + RTC_LOG(LS_WARNING) << "Failed to switch encoder to: " << format.ToString() + << ". Is default fallback allowed: " + << allow_default_fallback; + + if (allow_default_fallback) { + RequestEncoderFallback(); + } } bool WebRtcVideoChannel::ApplyChangedParams( @@ -985,7 +960,7 @@ bool WebRtcVideoChannel::ApplyChangedParams( SetExtmapAllowMixed(*changed_params.extmap_allow_mixed); } if (changed_params.rtp_header_extensions) { - send_rtp_extensions_ = changed_params.rtp_header_extensions; + send_rtp_extensions_ = *changed_params.rtp_header_extensions; } if (changed_params.send_codec || changed_params.max_bandwidth_bps) { @@ -1063,8 +1038,16 @@ webrtc::RtpParameters WebRtcVideoChannel::GetRtpSendParameters( // Need to add the common list of codecs to the send stream-specific // RTP parameters. for (const VideoCodec& codec : send_params_.codecs) { - rtp_params.codecs.push_back(codec.ToCodecParameters()); + if (send_codec_ && send_codec_->codec.id == codec.id) { + // Put the current send codec to the front of the codecs list. + RTC_DCHECK_EQ(codec.name, send_codec_->codec.name); + rtp_params.codecs.insert(rtp_params.codecs.begin(), + codec.ToCodecParameters()); + } else { + rtp_params.codecs.push_back(codec.ToCodecParameters()); + } } + return rtp_params; } @@ -1162,7 +1145,7 @@ bool WebRtcVideoChannel::GetChangedRecvParameters( const VideoRecvParameters& params, ChangedRecvParameters* changed_params) const { if (!ValidateCodecFormats(params.codecs) || - !ValidateRtpExtensions(params.extensions)) { + !ValidateRtpExtensions(params.extensions, recv_rtp_extensions_)) { return false; } @@ -1216,7 +1199,7 @@ bool WebRtcVideoChannel::GetChangedRecvParameters( bool WebRtcVideoChannel::SetRecvParameters(const VideoRecvParameters& params) { RTC_DCHECK_RUN_ON(&thread_checker_); TRACE_EVENT0("webrtc", "WebRtcVideoChannel::SetRecvParameters"); - RTC_DLOG(LS_INFO) << "SetRecvParameters: " << params.ToString(); + RTC_LOG(LS_INFO) << "SetRecvParameters: " << params.ToString(); ChangedRecvParameters changed_params; if (!GetChangedRecvParameters(params, &changed_params)) { return false; @@ -1448,18 +1431,18 @@ bool WebRtcVideoChannel::AddRecvStream(const StreamParams& sp, if (!ValidateStreamParams(sp)) return false; - uint32_t ssrc = sp.first_ssrc(); - - // Remove running stream if this was a default stream. - const auto& prev_stream = receive_streams_.find(ssrc); - if (prev_stream != receive_streams_.end()) { - if (default_stream || !prev_stream->second->IsDefaultStream()) { - RTC_LOG(LS_ERROR) << "Receive stream for SSRC '" << ssrc - << "' already exists."; - return false; + for (uint32_t ssrc : sp.ssrcs) { + // Remove running stream if this was a default stream. + const auto& prev_stream = receive_streams_.find(ssrc); + if (prev_stream != receive_streams_.end()) { + if (default_stream || !prev_stream->second->IsDefaultStream()) { + RTC_LOG(LS_ERROR) << "Receive stream for SSRC '" << ssrc + << "' already exists."; + return false; + } + DeleteReceiveStream(prev_stream->second); + receive_streams_.erase(prev_stream); } - DeleteReceiveStream(prev_stream->second); - receive_streams_.erase(prev_stream); } if (!ValidateReceiveSsrcAvailability(sp)) @@ -1482,7 +1465,7 @@ bool WebRtcVideoChannel::AddRecvStream(const StreamParams& sp, if (unsignaled_frame_transformer_ && !config.frame_transformer) config.frame_transformer = unsignaled_frame_transformer_; - receive_streams_[ssrc] = new WebRtcVideoReceiveStream( + receive_streams_[sp.first_ssrc()] = new WebRtcVideoReceiveStream( this, call_, sp, std::move(config), default_stream, recv_codecs_, flexfec_config); @@ -1539,7 +1522,7 @@ void WebRtcVideoChannel::ConfigureReceiverRtp( flexfec_config->protected_media_ssrcs = {ssrc}; flexfec_config->rtp.local_ssrc = config->rtp.local_ssrc; flexfec_config->rtcp_mode = config->rtp.rtcp_mode; - // TODO(brandtr): We should be spec-compliant and set |transport_cc| here + // TODO(brandtr): We should be spec-compliant and set `transport_cc` here // based on the rtcp-fb for the FlexFEC codec, not the media codec. flexfec_config->rtp.transport_cc = config->rtp.transport_cc; flexfec_config->rtp.extensions = config->rtp.extensions; @@ -1569,7 +1552,7 @@ void WebRtcVideoChannel::ResetUnsignaledRecvStream() { last_unsignalled_ssrc_creation_time_ms_ = absl::nullopt; // Delete any created default streams. This is needed to avoid SSRC collisions - // in Call's RtpDemuxer, in the case that |this| has created a default video + // in Call's RtpDemuxer, in the case that `this` has created a default video // receiver, and then some other WebRtcVideoChannel gets the SSRC signaled // in the corresponding Unified Plan "m=" section. auto it = receive_streams_.begin(); @@ -1589,11 +1572,8 @@ void WebRtcVideoChannel::OnDemuxerCriteriaUpdatePending() { } void WebRtcVideoChannel::OnDemuxerCriteriaUpdateComplete() { - RTC_DCHECK_RUN_ON(&network_thread_checker_); - worker_thread_->PostTask(ToQueuedTask(task_safety_, [this] { - RTC_DCHECK_RUN_ON(&thread_checker_); - ++demuxer_criteria_completed_id_; - })); + RTC_DCHECK_RUN_ON(&thread_checker_); + ++demuxer_criteria_completed_id_; } bool WebRtcVideoChannel::SetSink( @@ -1727,10 +1707,7 @@ void WebRtcVideoChannel::OnPacketReceived(rtc::CopyOnWriteBuffer packet, break; } - uint32_t ssrc = 0; - if (!GetRtpSsrc(packet.cdata(), packet.size(), &ssrc)) { - return; - } + uint32_t ssrc = ParseRtpSsrc(packet); if (unknown_ssrc_packet_buffer_) { unknown_ssrc_packet_buffer_->AddPacket(ssrc, packet_time_us, packet); @@ -1741,10 +1718,7 @@ void WebRtcVideoChannel::OnPacketReceived(rtc::CopyOnWriteBuffer packet, return; } - int payload_type = 0; - if (!GetRtpPayloadType(packet.cdata(), packet.size(), &payload_type)) { - return; - } + int payload_type = ParseRtpPayloadType(packet); // See if this payload_type is registered as one that usually gets its // own SSRC (RTX) or at least is safe to drop either way (FEC). If it @@ -1869,11 +1843,12 @@ void WebRtcVideoChannel::OnReadyToSend(bool ready) { } void WebRtcVideoChannel::OnNetworkRouteChanged( - const std::string& transport_name, + absl::string_view transport_name, const rtc::NetworkRoute& network_route) { RTC_DCHECK_RUN_ON(&network_thread_checker_); worker_thread_->PostTask(ToQueuedTask( - task_safety_, [this, name = transport_name, route = network_route] { + task_safety_, + [this, name = std::string(transport_name), route = network_route] { RTC_DCHECK_RUN_ON(&thread_checker_); webrtc::RtpTransportControllerSendInterface* transport = call_->GetTransportControllerSend(); @@ -1950,11 +1925,6 @@ void WebRtcVideoChannel::SetVideoCodecSwitchingEnabled(bool enabled) { allow_codec_switching_ = enabled; if (allow_codec_switching_) { RTC_LOG(LS_INFO) << "Encoder switching enabled."; - if (requested_encoder_switch_) { - RTC_LOG(LS_INFO) << "Executing cached video encoder switch request."; - RequestEncoderSwitch(*requested_encoder_switch_); - requested_encoder_switch_.reset(); - } } } @@ -2181,7 +2151,7 @@ webrtc::DegradationPreference WebRtcVideoChannel::WebRtcVideoSendStream::GetDegradationPreference() const { // Do not adapt resolution for screen content as this will likely // result in blurry and unreadable text. - // |this| acts like a VideoSource to make sure SinkWants are handled on the + // `this` acts like a VideoSource to make sure SinkWants are handled on the // correct thread. if (!enable_cpu_overuse_detection_) { return webrtc::DegradationPreference::DISABLED; @@ -2265,7 +2235,7 @@ void WebRtcVideoChannel::WebRtcVideoSendStream::SetCodec( void WebRtcVideoChannel::WebRtcVideoSendStream::SetSendParameters( const ChangedSendParameters& params) { RTC_DCHECK_RUN_ON(&thread_checker_); - // |recreate_stream| means construction-time parameters have changed and the + // `recreate_stream` means construction-time parameters have changed and the // sending stream needs to be reset with the new config. bool recreate_stream = false; if (params.rtcp_mode) { @@ -2342,19 +2312,15 @@ webrtc::RTCError WebRtcVideoChannel::WebRtcVideoSendStream::SetRtpParameters( new_degradation_preference = true; } - // TODO(bugs.webrtc.org/8807): The bitrate priority really doesn't require an - // entire encoder reconfiguration, it just needs to update the bitrate - // allocator. + // Some fields (e.g. bitrate priority) only need to update the bitrate + // allocator which is updated via ReconfigureEncoder (however, note that the + // actual encoder should only be reconfigured if needed). bool reconfigure_encoder = new_param || (new_parameters.encodings[0].bitrate_priority != rtp_parameters_.encodings[0].bitrate_priority) || new_parameters.encodings[0].scalability_mode != rtp_parameters_.encodings[0].scalability_mode; - // TODO(bugs.webrtc.org/8807): The active field as well should not require - // a full encoder reconfiguration, but it needs to update both the bitrate - // allocator and the video bitrate allocator. - // // Note that the simulcast encoder adapter relies on the fact that layers // de/activation triggers encoder reinitialization. bool new_send_state = false; @@ -2554,7 +2520,7 @@ WebRtcVideoChannel::WebRtcVideoSendStream::CreateVideoEncoderConfig( void WebRtcVideoChannel::WebRtcVideoSendStream::ReconfigureEncoder() { RTC_DCHECK_RUN_ON(&thread_checker_); if (!stream_) { - // The webrtc::VideoSendStream |stream_| has not yet been created but other + // The webrtc::VideoSendStream `stream_` has not yet been created but other // parameters has changed. return; } @@ -2634,8 +2600,8 @@ WebRtcVideoChannel::WebRtcVideoSendStream::GetPerLayerVideoSenderInfos( common_info.aggregated_framerate_sent = stats.encode_frame_rate; common_info.aggregated_huge_frames_sent = stats.huge_frames_sent; - // If we don't have any substreams, get the remaining metrics from |stats|. - // Otherwise, these values are obtained from |sub_stream| below. + // If we don't have any substreams, get the remaining metrics from `stats`. + // Otherwise, these values are obtained from `sub_stream` below. if (stats.substreams.empty()) { for (uint32_t ssrc : parameters_.config.rtp.ssrcs) { common_info.add_ssrc(ssrc); @@ -3000,7 +2966,7 @@ void WebRtcVideoChannel::WebRtcVideoReceiveStream::SetFeedbackParameters( config_.rtp.nack.rtp_history_ms = nack_history_ms; config_.rtp.transport_cc = transport_cc_enabled; config_.rtp.rtcp_mode = rtcp_mode; - // TODO(brandtr): We should be spec-compliant and set |transport_cc| here + // TODO(brandtr): We should be spec-compliant and set `transport_cc` here // based on the rtcp-fb for the FlexFEC codec, not the media codec. flexfec_config_.rtp.transport_cc = config_.rtp.transport_cc; flexfec_config_.rtcp_mode = config_.rtp.rtcp_mode; @@ -3020,13 +2986,20 @@ void WebRtcVideoChannel::WebRtcVideoReceiveStream::SetRecvParameters( if (params.rtp_header_extensions) { if (config_.rtp.extensions != *params.rtp_header_extensions) { config_.rtp.extensions = *params.rtp_header_extensions; - video_needs_recreation = true; + if (stream_) { + stream_->SetRtpExtensions(config_.rtp.extensions); + } else { + video_needs_recreation = true; + } } if (flexfec_config_.rtp.extensions != *params.rtp_header_extensions) { flexfec_config_.rtp.extensions = *params.rtp_header_extensions; - if (flexfec_stream_ || flexfec_config_.IsCompleteAndEnabled()) + if (flexfec_stream_) { + flexfec_stream_->SetRtpExtensions(flexfec_config_.rtp.extensions); + } else if (flexfec_config_.IsCompleteAndEnabled()) { video_needs_recreation = true; + } } } if (params.flexfec_payload_type) { @@ -3163,7 +3136,7 @@ WebRtcVideoChannel::WebRtcVideoReceiveStream::GetVideoReceiverInfo( stats.rtp_stats.packet_counter.padding_bytes; info.packets_rcvd = stats.rtp_stats.packet_counter.packets; info.packets_lost = stats.rtp_stats.packets_lost; - info.jitter_ms = stats.rtp_stats.jitter; + info.jitter_ms = stats.rtp_stats.jitter / (kVideoCodecClockrate / 1000); info.framerate_rcvd = stats.network_frame_rate; info.framerate_decoded = stats.decode_frame_rate; @@ -3300,7 +3273,7 @@ WebRtcVideoChannel::MapCodecs(const std::vector& codecs) { std::vector video_codecs; std::map payload_codec_type; - // |rtx_mapping| maps video payload type to rtx payload type. + // `rtx_mapping` maps video payload type to rtx payload type. std::map rtx_mapping; std::map rtx_time_mapping; @@ -3592,13 +3565,13 @@ EncoderStreamFactory::CreateDefaultVideoStreams( layer.max_framerate = max_framerate; if (encoder_config.simulcast_layers[0].scale_resolution_down_by > 1.) { - layer.width = std::max( - layer.width / - encoder_config.simulcast_layers[0].scale_resolution_down_by, + layer.width = ScaleDownResolution( + layer.width, + encoder_config.simulcast_layers[0].scale_resolution_down_by, kMinLayerSize); - layer.height = std::max( - layer.height / - encoder_config.simulcast_layers[0].scale_resolution_down_by, + layer.height = ScaleDownResolution( + layer.height, + encoder_config.simulcast_layers[0].scale_resolution_down_by, kMinLayerSize); } @@ -3674,16 +3647,19 @@ EncoderStreamFactory::CreateSimulcastOrConferenceModeScreenshareStreams( const bool norm_size_configured = webrtc::NormalizeSimulcastSizeExperiment::GetBase2Exponent().has_value(); const int normalized_width = - (default_scale_factors_used || norm_size_configured) + (default_scale_factors_used || norm_size_configured) && + (width >= kMinLayerSize) ? NormalizeSimulcastSize(width, encoder_config.number_of_streams) : width; const int normalized_height = - (default_scale_factors_used || norm_size_configured) + (default_scale_factors_used || norm_size_configured) && + (height >= kMinLayerSize) ? NormalizeSimulcastSize(height, encoder_config.number_of_streams) : height; - for (size_t i = 0; i < layers.size(); ++i) { layers[i].active = encoder_config.simulcast_layers[i].active; + layers[i].scalability_mode = + encoder_config.simulcast_layers[i].scalability_mode; // Update with configured num temporal layers if supported by codec. if (encoder_config.simulcast_layers[i].num_temporal_layers && IsTemporalLayersSupported(codec_name_)) { @@ -3697,12 +3673,10 @@ EncoderStreamFactory::CreateSimulcastOrConferenceModeScreenshareStreams( if (has_scale_resolution_down_by) { const double scale_resolution_down_by = std::max( encoder_config.simulcast_layers[i].scale_resolution_down_by, 1.0); - layers[i].width = std::max( - static_cast(normalized_width / scale_resolution_down_by), - kMinLayerSize); - layers[i].height = std::max( - static_cast(normalized_height / scale_resolution_down_by), - kMinLayerSize); + layers[i].width = ScaleDownResolution( + normalized_width, scale_resolution_down_by, kMinLayerSize); + layers[i].height = ScaleDownResolution( + normalized_height, scale_resolution_down_by, kMinLayerSize); } // Update simulcast bitrates with configured min and max bitrate. if (encoder_config.simulcast_layers[i].min_bitrate_bps > 0) { diff --git a/media/engine/webrtc_video_engine.h b/media/engine/webrtc_video_engine.h index a67a010ed7..940985d9f8 100644 --- a/media/engine/webrtc_video_engine.h +++ b/media/engine/webrtc_video_engine.h @@ -167,7 +167,7 @@ class WebRtcVideoChannel : public VideoMediaChannel, int64_t packet_time_us) override; void OnPacketSent(const rtc::SentPacket& sent_packet) override; void OnReadyToSend(bool ready) override; - void OnNetworkRouteChanged(const std::string& transport_name, + void OnNetworkRouteChanged(absl::string_view transport_name, const rtc::NetworkRoute& network_route) override; void SetInterface(NetworkInterface* iface) override; @@ -218,17 +218,14 @@ class WebRtcVideoChannel : public VideoMediaChannel, std::vector GetSources(uint32_t ssrc) const override; - // Take the buffered packets for |ssrcs| and feed them into DeliverPacket. + // Take the buffered packets for `ssrcs` and feed them into DeliverPacket. // This method does nothing unless unknown_ssrc_packet_buffer_ is configured. void BackfillBufferedPackets(rtc::ArrayView ssrcs); // Implements webrtc::EncoderSwitchRequestCallback. void RequestEncoderFallback() override; - - // TODO(bugs.webrtc.org/11341) : Remove this version of RequestEncoderSwitch. - void RequestEncoderSwitch( - const EncoderSwitchRequestCallback::Config& conf) override; - void RequestEncoderSwitch(const webrtc::SdpVideoFormat& format) override; + void RequestEncoderSwitch(const webrtc::SdpVideoFormat& format, + bool allow_default_fallback) override; void SetRecordableEncodedFrameCallback( uint32_t ssrc, @@ -258,12 +255,12 @@ class WebRtcVideoChannel : public VideoMediaChannel, VideoCodecSettings(); // Checks if all members of |*this| are equal to the corresponding members - // of |other|. + // of `other`. bool operator==(const VideoCodecSettings& other) const; bool operator!=(const VideoCodecSettings& other) const; - // Checks if all members of |a|, except |flexfec_payload_type|, are equal - // to the corresponding members of |b|. + // Checks if all members of `a`, except `flexfec_payload_type`, are equal + // to the corresponding members of `b`. static bool EqualsDisregardingFlexfec(const VideoCodecSettings& a, const VideoCodecSettings& b); @@ -290,7 +287,7 @@ class WebRtcVideoChannel : public VideoMediaChannel, // These optionals are unset if not changed. absl::optional> codec_settings; absl::optional> rtp_header_extensions; - // Keep track of the FlexFEC payload type separately from |codec_settings|. + // Keep track of the FlexFEC payload type separately from `codec_settings`. // This allows us to recreate the FlexfecReceiveStream separately from the // VideoReceiveStream when the FlexFEC payload type is changed. absl::optional flexfec_payload_type; @@ -389,8 +386,8 @@ class WebRtcVideoChannel : public VideoMediaChannel, const VideoCodec& codec) const; void ReconfigureEncoder(); - // Calls Start or Stop according to whether or not |sending_| is true, - // and whether or not the encoding in |rtp_parameters_| is active. + // Calls Start or Stop according to whether or not `sending_` is true, + // and whether or not the encoding in `rtp_parameters_` is active. void UpdateSendState(); webrtc::DegradationPreference GetDegradationPreference() const @@ -494,7 +491,7 @@ class WebRtcVideoChannel : public VideoMediaChannel, webrtc::Call* const call_; const StreamParams stream_params_; - // Both |stream_| and |flexfec_stream_| are managed by |this|. They are + // Both `stream_` and `flexfec_stream_` are managed by `this`. They are // destroyed by calling call_->DestroyVideoReceiveStream and // call_->DestroyFlexfecReceiveStream, respectively. webrtc::VideoReceiveStream* stream_; @@ -577,8 +574,8 @@ class WebRtcVideoChannel : public VideoMediaChannel, // criteria because the streams live on the worker thread and the demuxer // lives on the network thread. Because packets are posted from the network // thread to the worker thread, they can still be in-flight when streams are - // reconfgured. This can happen when |demuxer_criteria_id_| and - // |demuxer_criteria_completed_id_| don't match. During this time, we do not + // reconfgured. This can happen when `demuxer_criteria_id_` and + // `demuxer_criteria_completed_id_` don't match. During this time, we do not // want to create unsignalled receive streams and should instead drop the // packets. E.g: // * If RemoveRecvStream(old_ssrc) was recently called, there may be packets @@ -598,7 +595,7 @@ class WebRtcVideoChannel : public VideoMediaChannel, std::vector negotiated_codecs_ RTC_GUARDED_BY(thread_checker_); - absl::optional> send_rtp_extensions_ + std::vector send_rtp_extensions_ RTC_GUARDED_BY(thread_checker_); webrtc::VideoEncoderFactory* const encoder_factory_ @@ -638,9 +635,10 @@ class WebRtcVideoChannel : public VideoMediaChannel, std::unique_ptr unknown_ssrc_packet_buffer_ RTC_GUARDED_BY(thread_checker_); + // TODO(bugs.webrtc.org/11341): Remove this and relevant PC API. Presence + // of multiple negotiated codecs allows generic encoder fallback on failures. + // Presence of EncoderSelector allows switching to specific encoders. bool allow_codec_switching_ = false; - absl::optional - requested_encoder_switch_; }; class EncoderStreamFactory diff --git a/media/engine/webrtc_video_engine_unittest.cc b/media/engine/webrtc_video_engine_unittest.cc index d0745e35f5..a1aa88de7b 100644 --- a/media/engine/webrtc_video_engine_unittest.cc +++ b/media/engine/webrtc_video_engine_unittest.cc @@ -30,6 +30,7 @@ #include "api/test/video/function_video_decoder_factory.h" #include "api/transport/field_trial_based_config.h" #include "api/units/time_delta.h" +#include "api/units/timestamp.h" #include "api/video/builtin_video_bitrate_allocator_factory.h" #include "api/video/i420_buffer.h" #include "api/video/video_bitrate_allocation.h" @@ -64,6 +65,7 @@ #include "test/field_trial.h" #include "test/frame_forwarder.h" #include "test/gmock.h" +#include "test/time_controller/simulated_time_controller.h" using ::testing::_; using ::testing::Contains; @@ -77,8 +79,9 @@ using ::testing::Return; using ::testing::SizeIs; using ::testing::StrNe; using ::testing::Values; -using webrtc::BitrateConstraints; -using webrtc::RtpExtension; +using ::webrtc::BitrateConstraints; +using ::webrtc::RtpExtension; +using ::webrtc::RtpPacket; namespace { static const int kDefaultQpMax = 56; @@ -126,8 +129,8 @@ void VerifyCodecHasDefaultFeedbackParams(const cricket::VideoCodec& codec, cricket::kRtcpFbParamCcm, cricket::kRtcpFbCcmParamFir))); } -// Return true if any codec in |codecs| is an RTX codec with associated payload -// type |payload_type|. +// Return true if any codec in `codecs` is an RTX codec with associated payload +// type `payload_type`. bool HasRtxCodec(const std::vector& codecs, int payload_type) { for (const cricket::VideoCodec& codec : codecs) { @@ -241,12 +244,13 @@ class WebRtcVideoEngineTest : public ::testing::Test { public: WebRtcVideoEngineTest() : WebRtcVideoEngineTest("") {} explicit WebRtcVideoEngineTest(const std::string& field_trials) - : override_field_trials_( + : time_controller_(webrtc::Timestamp::Millis(4711)), + override_field_trials_( field_trials.empty() ? nullptr : std::make_unique( field_trials)), - task_queue_factory_(webrtc::CreateDefaultTaskQueueFactory()), + task_queue_factory_(time_controller_.CreateTaskQueueFactory()), call_(webrtc::Call::Create([&] { webrtc::Call::Config call_config(&event_log_); call_config.task_queue_factory = task_queue_factory_.get(); @@ -261,11 +265,7 @@ class WebRtcVideoEngineTest : public ::testing::Test { encoder_factory_), std::unique_ptr( decoder_factory_), - field_trials_) { - // Ensure fake clock doesn't return 0, which will cause some initializations - // fail inside RTP senders. - fake_clock_.AdvanceTime(webrtc::TimeDelta::Micros(1)); - } + field_trials_) {} protected: void AssignDefaultAptRtxTypes(); @@ -286,9 +286,7 @@ class WebRtcVideoEngineTest : public ::testing::Test { void ExpectRtpCapabilitySupport(const char* uri, bool supported) const; - // Has to be the first one, so it is initialized before the call or there is a - // race condition in the clock access. - rtc::ScopedFakeClock fake_clock_; + webrtc::GlobalSimulatedTimeController time_controller_; std::unique_ptr override_field_trials_; webrtc::FieldTrialBasedConfig field_trials_; webrtc::RtcEventLogNull event_log_; @@ -559,6 +557,7 @@ TEST_F(WebRtcVideoEngineTest, UseFactoryForVp8WhenSupported) { rtc::kNumMicrosecsPerSec / 30); EXPECT_TRUE(channel->SetVideoSend(kSsrc, nullptr, &frame_forwarder)); frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame()); + time_controller_.AdvanceTime(webrtc::TimeDelta::Zero()); // Sending one frame will have allocate the encoder. ASSERT_TRUE(encoder_factory_->WaitForCreatedVideoEncoders(1)); EXPECT_TRUE_WAIT(encoder_factory_->encoders()[0]->GetNumEncodedFrames() > 0, @@ -809,7 +808,7 @@ TEST_F(WebRtcVideoEngineTest, UsesSimulcastAdapterForVp8Factories) { rtc::kNumMicrosecsPerSec / 60); EXPECT_TRUE(channel->SetVideoSend(ssrcs.front(), nullptr, &frame_forwarder)); frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame()); - + time_controller_.AdvanceTime(webrtc::TimeDelta::Zero()); ASSERT_TRUE(encoder_factory_->WaitForCreatedVideoEncoders(2)); // Verify that encoders are configured for simulcast through adapter @@ -851,6 +850,7 @@ TEST_F(WebRtcVideoEngineTest, ChannelWithH264CanChangeToVp8) { EXPECT_TRUE(channel->SetVideoSend(kSsrc, nullptr, &frame_forwarder)); // Sending one frame will have allocate the encoder. frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame()); + time_controller_.AdvanceTime(webrtc::TimeDelta::Zero()); ASSERT_EQ_WAIT(1u, encoder_factory_->encoders().size(), kTimeout); @@ -860,6 +860,7 @@ TEST_F(WebRtcVideoEngineTest, ChannelWithH264CanChangeToVp8) { // Sending one frame will switch encoder. frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame()); + time_controller_.AdvanceTime(webrtc::TimeDelta::Zero()); EXPECT_EQ_WAIT(1u, encoder_factory_->encoders().size(), kTimeout); } @@ -889,6 +890,7 @@ TEST_F(WebRtcVideoEngineTest, rtc::kNumMicrosecsPerSec / 30); EXPECT_TRUE(channel->SetVideoSend(ssrcs.front(), nullptr, &frame_forwarder)); frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame()); + time_controller_.AdvanceTime(webrtc::TimeDelta::Zero()); ASSERT_TRUE(encoder_factory_->WaitForCreatedVideoEncoders(2)); ASSERT_TRUE(encoder_factory_->encoders()[0]->WaitForInitEncode()); @@ -921,6 +923,7 @@ TEST_F(WebRtcVideoEngineTest, rtc::kNumMicrosecsPerSec / 30); EXPECT_TRUE(channel->SetVideoSend(kSsrc, nullptr, &frame_forwarder)); frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame()); + time_controller_.AdvanceTime(webrtc::TimeDelta::Zero()); ASSERT_TRUE(encoder_factory_->WaitForCreatedVideoEncoders(1)); ASSERT_EQ(1u, encoder_factory_->encoders().size()); ASSERT_TRUE(encoder_factory_->encoders()[0]->WaitForInitEncode()); @@ -955,6 +958,7 @@ TEST_F(WebRtcVideoEngineTest, SimulcastEnabledForH264BehindFieldTrial) { rtc::kNumMicrosecsPerSec / 30); EXPECT_TRUE(channel->SetVideoSend(ssrcs[0], nullptr, &frame_forwarder)); frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame()); + time_controller_.AdvanceTime(webrtc::TimeDelta::Zero()); ASSERT_TRUE(encoder_factory_->WaitForCreatedVideoEncoders(1)); ASSERT_EQ(1u, encoder_factory_->encoders().size()); @@ -1101,7 +1105,7 @@ TEST_F(WebRtcVideoEngineTest, RegisterH264DecoderIfSupported) { // Tests when GetSources is called with non-existing ssrc, it will return an // empty list of RtpSource without crashing. TEST_F(WebRtcVideoEngineTest, GetSourcesWithNonExistingSsrc) { - // Setup an recv stream with |kSsrc|. + // Setup an recv stream with `kSsrc`. AddSupportedVideoCodecType("VP8"); cricket::VideoRecvParameters parameters; parameters.codecs.push_back(GetEngineCodec("VP8")); @@ -1127,7 +1131,7 @@ TEST(WebRtcVideoEngineNewVideoCodecFactoryTest, NullFactories) { } TEST(WebRtcVideoEngineNewVideoCodecFactoryTest, EmptyFactories) { - // |engine| take ownership of the factories. + // `engine` take ownership of the factories. webrtc::MockVideoEncoderFactory* encoder_factory = new webrtc::MockVideoEncoderFactory(); webrtc::MockVideoDecoderFactory* decoder_factory = @@ -1150,7 +1154,7 @@ TEST(WebRtcVideoEngineNewVideoCodecFactoryTest, EmptyFactories) { // from the engine and that we will create a Vp8 encoder and decoder using the // new factories. TEST(WebRtcVideoEngineNewVideoCodecFactoryTest, Vp8) { - // |engine| take ownership of the factories. + // `engine` take ownership of the factories. webrtc::MockVideoEncoderFactory* encoder_factory = new webrtc::MockVideoEncoderFactory(); webrtc::MockVideoDecoderFactory* decoder_factory = @@ -1206,26 +1210,22 @@ TEST(WebRtcVideoEngineNewVideoCodecFactoryTest, Vp8) { VerifyCodecHasDefaultFeedbackParams(engine_codecs.at(0), /*lntf_expected=*/false); - // Mock encoder creation. |engine| take ownership of the encoder. - webrtc::VideoEncoderFactory::CodecInfo codec_info; - codec_info.has_internal_source = false; + // Mock encoder creation. `engine` take ownership of the encoder. const webrtc::SdpVideoFormat format("VP8"); - EXPECT_CALL(*encoder_factory, QueryVideoEncoder(format)) - .WillRepeatedly(Return(codec_info)); - rtc::Event encoder_created; EXPECT_CALL(*encoder_factory, CreateVideoEncoder(format)).WillOnce([&] { - encoder_created.Set(); return std::make_unique(nullptr); }); - // Mock decoder creation. |engine| take ownership of the decoder. + // Mock decoder creation. `engine` take ownership of the decoder. EXPECT_CALL(*decoder_factory, CreateVideoDecoder(format)).WillOnce([] { return std::make_unique(nullptr); }); // Create a call. webrtc::RtcEventLogNull event_log; - auto task_queue_factory = webrtc::CreateDefaultTaskQueueFactory(); + webrtc::GlobalSimulatedTimeController time_controller( + webrtc::Timestamp::Millis(4711)); + auto task_queue_factory = time_controller.CreateTaskQueueFactory(); webrtc::Call::Config call_config(&event_log); webrtc::FieldTrialBasedConfig field_trials; call_config.trials = &field_trials; @@ -1252,7 +1252,7 @@ TEST(WebRtcVideoEngineNewVideoCodecFactoryTest, Vp8) { EXPECT_TRUE(send_channel->SetVideoSend(send_ssrc, nullptr, &frame_forwarder)); // Sending one frame will allocate the encoder. frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame()); - encoder_created.Wait(kTimeout); + time_controller.AdvanceTime(webrtc::TimeDelta::Zero()); // Create recv channel. const int recv_ssrc = 321; @@ -1275,7 +1275,7 @@ TEST(WebRtcVideoEngineNewVideoCodecFactoryTest, Vp8) { // Test behavior when decoder factory fails to create a decoder (returns null). TEST(WebRtcVideoEngineNewVideoCodecFactoryTest, NullDecoder) { - // |engine| take ownership of the factories. + // `engine` take ownership of the factories. webrtc::MockVideoEncoderFactory* encoder_factory = new webrtc::MockVideoEncoderFactory(); webrtc::MockVideoDecoderFactory* decoder_factory = @@ -1372,7 +1372,7 @@ TEST_F(WebRtcVideoEngineTest, DISABLED_RecreatesEncoderOnContentTypeChange) { options.video_noise_reduction.emplace(false); EXPECT_TRUE(channel->SetVideoSend(kSsrc, &options, &frame_forwarder)); // Change back to regular video content, update encoder. Also change - // a non |is_screencast| option just to verify it doesn't affect recreation. + // a non `is_screencast` option just to verify it doesn't affect recreation. frame_forwarder.IncomingCapturedFrame(frame_source.GetFrame()); ASSERT_TRUE(encoder_factory_->WaitForCreatedVideoEncoders(3)); EXPECT_EQ(webrtc::VideoCodecMode::kRealtimeVideo, @@ -1425,7 +1425,7 @@ class WebRtcVideoChannelEncodedFrameCallbackTest : public ::testing::Test { } void DeliverKeyFrame(uint32_t ssrc) { - webrtc::RtpPacket packet; + RtpPacket packet; packet.SetMarker(true); packet.SetPayloadType(96); // VP8 packet.SetSsrc(ssrc); @@ -1666,7 +1666,7 @@ class WebRtcVideoChannelBaseTest : public ::testing::Test { return network_interface_.GetRtpPacket(index); } static int GetPayloadType(rtc::CopyOnWriteBuffer p) { - webrtc::RtpPacket header; + RtpPacket header; EXPECT_TRUE(header.Parse(std::move(p))); return header.PayloadType(); } @@ -1850,10 +1850,11 @@ TEST_F(WebRtcVideoChannelBaseTest, InvalidRecvBufferSize) { // This particular hack, pauses the transport controller TQ while we // change the field trial. rtc::TaskQueue* tq = call_->GetTransportControllerSend()->GetWorkerQueue(); - rtc::Event waiting, resume; - tq->PostTask([&waiting, &resume]() { + rtc::Event waiting, resume, conclude; + tq->PostTask([&waiting, &resume, &conclude]() { waiting.Set(); resume.Wait(rtc::Event::kForever); + conclude.Set(); }); waiting.Wait(rtc::Event::kForever); @@ -1862,6 +1863,8 @@ TEST_F(WebRtcVideoChannelBaseTest, InvalidRecvBufferSize) { SetUp(); resume.Set(); + // Ensure we don't cause a UAF as the test scope exits. + conclude.Wait(rtc::Event::kForever); // OK, now the test can carry on. @@ -2060,7 +2063,7 @@ TEST_F(WebRtcVideoChannelBaseTest, SetSendSsrc) { EXPECT_TRUE(SetSend(true)); SendFrame(); EXPECT_TRUE_WAIT(NumRtpPackets() > 0, kTimeout); - webrtc::RtpPacket header; + RtpPacket header; EXPECT_TRUE(header.Parse(GetRtpPacket(0))); EXPECT_EQ(kSsrc, header.Ssrc()); @@ -2084,7 +2087,7 @@ TEST_F(WebRtcVideoChannelBaseTest, SetSendSsrcAfterSetCodecs) { EXPECT_TRUE(SetSend(true)); EXPECT_TRUE(WaitAndSendFrame(0)); EXPECT_TRUE_WAIT(NumRtpPackets() > 0, kTimeout); - webrtc::RtpPacket header; + RtpPacket header; EXPECT_TRUE(header.Parse(GetRtpPacket(0))); EXPECT_EQ(999u, header.Ssrc()); // Packets are being paced out, so these can mismatch between the first and @@ -2099,16 +2102,13 @@ TEST_F(WebRtcVideoChannelBaseTest, SetSendSsrcAfterSetCodecs) { // Test that we can set the default video renderer before and after // media is received. TEST_F(WebRtcVideoChannelBaseTest, SetSink) { - uint8_t data1[] = {0x80, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; - - rtc::CopyOnWriteBuffer packet1(data1, sizeof(data1)); - rtc::SetBE32(packet1.MutableData() + 8, kSsrc); + RtpPacket packet; + packet.SetSsrc(kSsrc); channel_->SetDefaultSink(NULL); EXPECT_TRUE(SetDefaultCodec()); EXPECT_TRUE(SetSend(true)); EXPECT_EQ(0, renderer_.num_rendered_frames()); - channel_->OnPacketReceived(packet1, /* packet_time_us */ -1); + channel_->OnPacketReceived(packet.Buffer(), /* packet_time_us */ -1); channel_->SetDefaultSink(&renderer_); SendFrame(); EXPECT_FRAME_WAIT(1, kVideoWidth, kVideoHeight, kTimeout); @@ -2122,7 +2122,7 @@ TEST_F(WebRtcVideoChannelBaseTest, AddRemoveSendStreams) { SendFrame(); EXPECT_FRAME_WAIT(1, kVideoWidth, kVideoHeight, kTimeout); EXPECT_GT(NumRtpPackets(), 0); - webrtc::RtpPacket header; + RtpPacket header; size_t last_packet = NumRtpPackets() - 1; EXPECT_TRUE(header.Parse(GetRtpPacket(static_cast(last_packet)))); EXPECT_EQ(kSsrc, header.Ssrc()); @@ -2405,116 +2405,80 @@ TEST_F(WebRtcVideoChannelBaseTest, RequestEncoderFallback) { EXPECT_EQ("VP8", codec.name); } -TEST_F(WebRtcVideoChannelBaseTest, RequestEncoderSwitchWithConfig) { - const std::string kParam = "the-param"; - const std::string kPing = "ping"; - const std::string kPong = "pong"; - +TEST_F(WebRtcVideoChannelBaseTest, RequestEncoderSwitchDefaultFallback) { cricket::VideoSendParameters parameters; - VideoCodec vp9 = GetEngineCodec("VP9"); - vp9.params[kParam] = kPong; - parameters.codecs.push_back(vp9); - - VideoCodec vp8 = GetEngineCodec("VP8"); - vp8.params[kParam] = kPing; - parameters.codecs.push_back(vp8); - + parameters.codecs.push_back(GetEngineCodec("VP9")); + parameters.codecs.push_back(GetEngineCodec("VP8")); EXPECT_TRUE(channel_->SetSendParameters(parameters)); - channel_->SetVideoCodecSwitchingEnabled(true); VideoCodec codec; ASSERT_TRUE(channel_->GetSendCodec(&codec)); - EXPECT_THAT(codec.name, Eq("VP9")); + EXPECT_EQ("VP9", codec.name); // RequestEncoderSwitch will post a task to the worker thread (which is also // the current thread), hence the ProcessMessages call. - webrtc::EncoderSwitchRequestCallback::Config conf1{"VP8", kParam, kPing}; - channel_->RequestEncoderSwitch(conf1); + channel_->RequestEncoderSwitch(webrtc::SdpVideoFormat("UnavailableCodec"), + /*allow_default_fallback=*/true); rtc::Thread::Current()->ProcessMessages(30); - ASSERT_TRUE(channel_->GetSendCodec(&codec)); - EXPECT_THAT(codec.name, Eq("VP8")); - EXPECT_THAT(codec.params, Contains(Pair(kParam, kPing))); - webrtc::EncoderSwitchRequestCallback::Config conf2{"VP9", kParam, kPong}; - channel_->RequestEncoderSwitch(conf2); - rtc::Thread::Current()->ProcessMessages(30); + // Requested encoder is not available. Default fallback is allowed. Switch to + // the next negotiated codec, VP8. ASSERT_TRUE(channel_->GetSendCodec(&codec)); - EXPECT_THAT(codec.name, Eq("VP9")); - EXPECT_THAT(codec.params, Contains(Pair(kParam, kPong))); + EXPECT_EQ("VP8", codec.name); } -TEST_F(WebRtcVideoChannelBaseTest, RequestEncoderSwitchIncorrectParam) { - const std::string kParam = "the-param"; - const std::string kPing = "ping"; - const std::string kPong = "pong"; +TEST_F(WebRtcVideoChannelBaseTest, RequestEncoderSwitchStrictPreference) { + VideoCodec vp9 = GetEngineCodec("VP9"); + vp9.params["profile-id"] = "0"; cricket::VideoSendParameters parameters; - VideoCodec vp9 = GetEngineCodec("VP9"); - vp9.params[kParam] = kPong; + parameters.codecs.push_back(GetEngineCodec("VP8")); parameters.codecs.push_back(vp9); - - VideoCodec vp8 = GetEngineCodec("VP8"); - vp8.params[kParam] = kPing; - parameters.codecs.push_back(vp8); - EXPECT_TRUE(channel_->SetSendParameters(parameters)); - channel_->SetVideoCodecSwitchingEnabled(true); VideoCodec codec; ASSERT_TRUE(channel_->GetSendCodec(&codec)); - EXPECT_THAT(codec.name, Eq("VP9")); + EXPECT_EQ("VP8", codec.name); - // RequestEncoderSwitch will post a task to the worker thread (which is also - // the current thread), hence the ProcessMessages call. - webrtc::EncoderSwitchRequestCallback::Config conf1{"VP8", kParam, kPing}; - channel_->RequestEncoderSwitch(conf1); + channel_->RequestEncoderSwitch( + webrtc::SdpVideoFormat("VP9", {{"profile-id", "1"}}), + /*allow_default_fallback=*/false); rtc::Thread::Current()->ProcessMessages(30); + + // VP9 profile_id=1 is not available. Default fallback is not allowed. Switch + // is not performed. ASSERT_TRUE(channel_->GetSendCodec(&codec)); - EXPECT_THAT(codec.name, Eq("VP8")); - EXPECT_THAT(codec.params, Contains(Pair(kParam, kPing))); + EXPECT_EQ("VP8", codec.name); - // Incorrect conf2.value, expect no codec switch. - webrtc::EncoderSwitchRequestCallback::Config conf2{"VP9", kParam, kPing}; - channel_->RequestEncoderSwitch(conf2); + channel_->RequestEncoderSwitch( + webrtc::SdpVideoFormat("VP9", {{"profile-id", "0"}}), + /*allow_default_fallback=*/false); rtc::Thread::Current()->ProcessMessages(30); + + // VP9 profile_id=0 is available. Switch encoder. ASSERT_TRUE(channel_->GetSendCodec(&codec)); - EXPECT_THAT(codec.name, Eq("VP8")); - EXPECT_THAT(codec.params, Contains(Pair(kParam, kPing))); + EXPECT_EQ("VP9", codec.name); } -TEST_F(WebRtcVideoChannelBaseTest, - RequestEncoderSwitchWithConfigBeforeEnabling) { - const std::string kParam = "the-param"; - const std::string kPing = "ping"; - const std::string kPong = "pong"; - +TEST_F(WebRtcVideoChannelBaseTest, SendCodecIsMovedToFrontInRtpParameters) { cricket::VideoSendParameters parameters; - VideoCodec vp9 = GetEngineCodec("VP9"); - vp9.params[kParam] = kPong; - parameters.codecs.push_back(vp9); - - VideoCodec vp8 = GetEngineCodec("VP8"); - vp8.params[kParam] = kPing; - parameters.codecs.push_back(vp8); - + parameters.codecs.push_back(GetEngineCodec("VP9")); + parameters.codecs.push_back(GetEngineCodec("VP8")); EXPECT_TRUE(channel_->SetSendParameters(parameters)); - - VideoCodec codec; - ASSERT_TRUE(channel_->GetSendCodec(&codec)); - EXPECT_THAT(codec.name, Eq("VP9")); - - webrtc::EncoderSwitchRequestCallback::Config conf{"VP8", kParam, kPing}; - channel_->RequestEncoderSwitch(conf); - - // Enable codec switching after it has been requested. channel_->SetVideoCodecSwitchingEnabled(true); - // RequestEncoderSwitch will post a task to the worker thread (which is also + auto send_codecs = channel_->GetRtpSendParameters(kSsrc).codecs; + ASSERT_EQ(send_codecs.size(), 2u); + EXPECT_THAT("VP9", send_codecs[0].name); + + // RequestEncoderFallback will post a task to the worker thread (which is also // the current thread), hence the ProcessMessages call. + channel_->RequestEncoderFallback(); rtc::Thread::Current()->ProcessMessages(30); - ASSERT_TRUE(channel_->GetSendCodec(&codec)); - EXPECT_THAT(codec.name, Eq("VP8")); - EXPECT_THAT(codec.params, Contains(Pair(kParam, kPing))); + + send_codecs = channel_->GetRtpSendParameters(kSsrc).codecs; + ASSERT_EQ(send_codecs.size(), 2u); + EXPECT_THAT("VP8", send_codecs[0].name); } #endif // defined(RTC_ENABLE_VP9) @@ -2572,7 +2536,7 @@ class WebRtcVideoChannelTest : public WebRtcVideoEngineTest { int64_t packet_time_us) { channel_->OnPacketReceived(packet, packet_time_us); rtc::Thread::Current()->ProcessMessages(0); - fake_clock_.AdvanceTime( + time_controller_.AdvanceTime( webrtc::TimeDelta::Millis(kUnsignalledReceiveStreamCooldownMs)); } @@ -3073,11 +3037,11 @@ TEST_F(WebRtcVideoChannelTest, IdenticalRecvExtensionsDoesntRecreateStream) { EXPECT_EQ(1, fake_call_->GetNumCreatedReceiveStreams()); - // Setting different extensions should recreate the stream. + // Setting different extensions should not require the stream to be recreated. recv_parameters_.extensions.resize(1); EXPECT_TRUE(channel_->SetRecvParameters(recv_parameters_)); - EXPECT_EQ(2, fake_call_->GetNumCreatedReceiveStreams()); + EXPECT_EQ(1, fake_call_->GetNumCreatedReceiveStreams()); } TEST_F(WebRtcVideoChannelTest, @@ -3539,16 +3503,16 @@ TEST_F(WebRtcVideoChannelTest, VerifyVp8SpecificSettings) { EXPECT_EQ(3u, stream->GetVideoStreams().size()); ASSERT_TRUE(stream->GetVp8Settings(&vp8_settings)) << "No VP8 config set."; EXPECT_FALSE(vp8_settings.denoisingOn); - // Resizing and frame dropping always off for screen sharing. + // Resizing always off for screen sharing. EXPECT_FALSE(vp8_settings.automaticResizeOn); - EXPECT_FALSE(vp8_settings.frameDroppingOn); + EXPECT_TRUE(vp8_settings.frameDroppingOn); stream = SetDenoisingOption(last_ssrc_, &frame_forwarder, true); ASSERT_TRUE(stream->GetVp8Settings(&vp8_settings)) << "No VP8 config set."; EXPECT_FALSE(vp8_settings.denoisingOn); EXPECT_FALSE(vp8_settings.automaticResizeOn); - EXPECT_FALSE(vp8_settings.frameDroppingOn); + EXPECT_TRUE(vp8_settings.frameDroppingOn); EXPECT_TRUE(channel_->SetVideoSend(last_ssrc_, nullptr, nullptr)); } @@ -3575,7 +3539,7 @@ TEST_F(WebRtcVideoChannelTest, SetIdenticalOptionsDoesntReconfigureEncoder) { EXPECT_TRUE(channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder)); EXPECT_EQ(1, send_stream->num_encoder_reconfigurations()); - // Change |options| and expect 2 reconfigurations. + // Change `options` and expect 2 reconfigurations. options.video_noise_reduction = true; EXPECT_TRUE(channel_->SetVideoSend(last_ssrc_, &options, &frame_forwarder)); EXPECT_EQ(2, send_stream->num_encoder_reconfigurations()); @@ -3793,14 +3757,6 @@ INSTANTIATE_TEST_SUITE_P( Vp9SettingsTestWithFieldTrial, Values( std::make_tuple("", 1, 1, webrtc::InterLayerPredMode::kOnKeyPic), - std::make_tuple("WebRTC-SupportVP9SVC/Default/", - 1, - 1, - webrtc::InterLayerPredMode::kOnKeyPic), - std::make_tuple("WebRTC-SupportVP9SVC/EnabledByFlag_2SL3TL/", - 2, - 3, - webrtc::InterLayerPredMode::kOnKeyPic), std::make_tuple("WebRTC-Vp9InterLayerPred/Default/", 1, 1, @@ -4024,7 +3980,7 @@ TEST_F(WebRtcVideoChannelTest, EstimatesNtpStartTimeCorrectly) { // This timestamp is kInitialTimestamp (-1) + kFrameOffsetMs * 90, which // triggers a constant-overflow warning, hence we're calculating it explicitly // here. - fake_clock_.AdvanceTime(webrtc::TimeDelta::Millis(kFrameOffsetMs)); + time_controller_.AdvanceTime(webrtc::TimeDelta::Millis(kFrameOffsetMs)); video_frame.set_timestamp(kFrameOffsetMs * 90 - 1); video_frame.set_ntp_time_ms(kInitialNtpTimeMs + kFrameOffsetMs); stream->InjectFrame(video_frame); @@ -4369,7 +4325,7 @@ TEST_F(WebRtcVideoChannelFlexfecRecvTest, SetRecvCodecsWithFec) { EXPECT_EQ(video_stream_config.rtp.rtcp_mode, flexfec_stream_config.rtcp_mode); EXPECT_EQ(video_stream_config.rtcp_send_transport, flexfec_stream_config.rtcp_send_transport); - // TODO(brandtr): Update this EXPECT when we set |transport_cc| in a + // TODO(brandtr): Update this EXPECT when we set `transport_cc` in a // spec-compliant way. EXPECT_EQ(video_stream_config.rtp.transport_cc, flexfec_stream_config.rtp.transport_cc); @@ -5486,7 +5442,7 @@ TEST_F(WebRtcVideoChannelTest, GetStatsReportsPerLayerQpSum) { webrtc::VideoSendStream::Stats GetInitialisedStats() { webrtc::VideoSendStream::Stats stats; stats.encoder_implementation_name = "vp"; - stats.input_frame_rate = 1; + stats.input_frame_rate = 1.0; stats.encode_frame_rate = 2; stats.avg_encode_time_ms = 3; stats.encode_usage_percent = 4; @@ -6301,12 +6257,9 @@ TEST_F(WebRtcVideoChannelTest, DefaultReceiveStreamReconfiguresToUseRtx) { const std::vector rtx_ssrcs = MAKE_VECTOR(kRtxSsrcs1); ASSERT_EQ(0u, fake_call_->GetVideoReceiveStreams().size()); - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], ssrcs[0]); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(ssrcs[0]); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); ASSERT_EQ(1u, fake_call_->GetVideoReceiveStreams().size()) << "No default receive stream created."; @@ -6462,12 +6415,9 @@ TEST_F(WebRtcVideoChannelTest, RecvUnsignaledSsrcWithSignaledStreamId) { EXPECT_EQ(0u, fake_call_->GetVideoReceiveStreams().size()); // Create and deliver packet. - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kIncomingUnsignalledSsrc); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kIncomingUnsignalledSsrc); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); // The stream should now be created with the appropriate sync label. EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size()); @@ -6482,14 +6432,14 @@ TEST_F(WebRtcVideoChannelTest, RecvUnsignaledSsrcWithSignaledStreamId) { // Until the demuxer criteria has been updated, we ignore in-flight ssrcs of // the recently removed unsignaled receive stream. - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); EXPECT_EQ(0u, fake_call_->GetVideoReceiveStreams().size()); // After the demuxer criteria has been updated, we should proceed to create // unsignalled receive streams. This time when a default video receive stream // is created it won't have a sync_group. channel_->OnDemuxerCriteriaUpdateComplete(); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size()); EXPECT_TRUE( fake_call_->GetVideoReceiveStreams()[0]->GetConfig().sync_group.empty()); @@ -6501,12 +6451,9 @@ TEST_F(WebRtcVideoChannelTest, EXPECT_TRUE(fake_call_->GetVideoReceiveStreams().empty()); // Packet with unsignaled SSRC is received. - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kIncomingUnsignalledSsrc); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kIncomingUnsignalledSsrc); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); // Default receive stream created. const auto& receivers1 = fake_call_->GetVideoReceiveStreams(); @@ -6551,21 +6498,15 @@ TEST_F(WebRtcVideoChannelTest, // the demuxer is updated. { // Receive a packet for kSsrc1. - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc1); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc1); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); } { // Receive a packet for kSsrc2. - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc2); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc2); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); } // No unsignaled ssrc for kSsrc2 should have been created, but kSsrc1 should @@ -6583,21 +6524,15 @@ TEST_F(WebRtcVideoChannelTest, // Receive packets for kSsrc1 and kSsrc2 again. { // Receive a packet for kSsrc1. - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc1); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc1); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); } { // Receive a packet for kSsrc2. - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc2); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc2); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); } // An unsignalled ssrc for kSsrc2 should be created and the packet counter @@ -6634,21 +6569,15 @@ TEST_F(WebRtcVideoChannelTest, // the demuxer is updated. { // Receive a packet for kSsrc1. - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc1); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc1); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); } { // Receive a packet for kSsrc2. - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc2); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc2); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); } // No unsignaled ssrc for kSsrc1 should have been created, but the packet @@ -6665,21 +6594,15 @@ TEST_F(WebRtcVideoChannelTest, // Receive packets for kSsrc1 and kSsrc2 again. { // Receive a packet for kSsrc1. - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc1); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc1); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); } { // Receive a packet for kSsrc2. - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc2); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc2); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); } // An unsignalled ssrc for kSsrc1 should be created and the packet counter @@ -6712,12 +6635,9 @@ TEST_F(WebRtcVideoChannelTest, MultiplePendingDemuxerCriteriaUpdates) { // In-flight packets should arrive because the stream was recreated, even // though demuxer criteria updates are pending... { - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); } EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 1u); @@ -6728,12 +6648,9 @@ TEST_F(WebRtcVideoChannelTest, MultiplePendingDemuxerCriteriaUpdates) { // This still should not prevent in-flight packets from arriving because we // have a receive stream for it. { - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); } EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 2u); @@ -6745,12 +6662,9 @@ TEST_F(WebRtcVideoChannelTest, MultiplePendingDemuxerCriteriaUpdates) { // Now the packet should be dropped and not create an unsignalled receive // stream. { - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); } EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 0u); EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 2u); @@ -6762,12 +6676,9 @@ TEST_F(WebRtcVideoChannelTest, MultiplePendingDemuxerCriteriaUpdates) { // The packets should continue to be dropped because removal happened after // the most recently completed demuxer update. { - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); } EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 0u); EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 2u); @@ -6779,12 +6690,9 @@ TEST_F(WebRtcVideoChannelTest, MultiplePendingDemuxerCriteriaUpdates) { // If packets still arrive after the demuxer knows about the latest removal we // should finally create an unsignalled receive stream. { - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); } EXPECT_EQ(fake_call_->GetVideoReceiveStreams().size(), 1u); EXPECT_EQ(fake_call_->GetDeliveredPacketsForSsrc(kSsrc), 3u); @@ -6797,15 +6705,12 @@ TEST_F(WebRtcVideoChannelTest, UnsignalledSsrcHasACooldown) { // Send packets for kSsrc1, creating an unsignalled receive stream. { // Receive a packet for kSsrc1. - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc1); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc1); + channel_->OnPacketReceived(packet.Buffer(), /* packet_time_us */ -1); } rtc::Thread::Current()->ProcessMessages(0); - fake_clock_.AdvanceTime( + time_controller_.AdvanceTime( webrtc::TimeDelta::Millis(kUnsignalledReceiveStreamCooldownMs - 1)); // We now have an unsignalled receive stream for kSsrc1. @@ -6815,12 +6720,9 @@ TEST_F(WebRtcVideoChannelTest, UnsignalledSsrcHasACooldown) { { // Receive a packet for kSsrc2. - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc2); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc2); + channel_->OnPacketReceived(packet.Buffer(), /* packet_time_us */ -1); } rtc::Thread::Current()->ProcessMessages(0); @@ -6832,15 +6734,12 @@ TEST_F(WebRtcVideoChannelTest, UnsignalledSsrcHasACooldown) { // After 500 ms, kSsrc2 should trigger a new unsignalled receive stream that // replaces the old one. - fake_clock_.AdvanceTime(webrtc::TimeDelta::Millis(1)); + time_controller_.AdvanceTime(webrtc::TimeDelta::Millis(1)); { // Receive a packet for kSsrc2. - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kSsrc2); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - channel_->OnPacketReceived(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kSsrc2); + channel_->OnPacketReceived(packet.Buffer(), /* packet_time_us */ -1); } rtc::Thread::Current()->ProcessMessages(0); @@ -6880,12 +6779,9 @@ TEST_F(WebRtcVideoChannelTest, BaseMinimumPlayoutDelayMsUnsignaledRecvStream) { // Spawn an unsignaled stream by sending a packet, it should inherit // default delay 200. - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - rtc::SetBE32(&data[8], kIncomingUnsignalledSsrc); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetSsrc(kIncomingUnsignalledSsrc); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); recv_stream = fake_call_->GetVideoReceiveStream(kIncomingUnsignalledSsrc); EXPECT_EQ(recv_stream->base_mininum_playout_delay_ms(), 200); @@ -6915,14 +6811,10 @@ void WebRtcVideoChannelTest::TestReceiveUnsignaledSsrcPacket( EXPECT_TRUE(channel_->SetRecvParameters(recv_parameters_)); ASSERT_EQ(0u, fake_call_->GetVideoReceiveStreams().size()); - const size_t kDataLength = 12; - uint8_t data[kDataLength]; - memset(data, 0, sizeof(data)); - - rtc::Set8(data, 1, payload_type); - rtc::SetBE32(&data[8], kIncomingUnsignalledSsrc); - rtc::CopyOnWriteBuffer packet(data, kDataLength); - ReceivePacketAndAdvanceTime(packet, /* packet_time_us */ -1); + RtpPacket packet; + packet.SetPayloadType(payload_type); + packet.SetSsrc(kIncomingUnsignalledSsrc); + ReceivePacketAndAdvanceTime(packet.Buffer(), /* packet_time_us */ -1); if (expect_created_receive_stream) { EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size()) @@ -7002,7 +6894,7 @@ TEST_F(WebRtcVideoChannelTest, ReceiveDifferentUnsignaledSsrc) { channel_->SetDefaultSink(&renderer); // Receive VP8 packet on first SSRC. - webrtc::RtpPacket rtp_packet; + RtpPacket rtp_packet; rtp_packet.SetPayloadType(GetEngineCodec("VP8").id); rtp_packet.SetSsrc(kIncomingUnsignalledSsrc + 1); ReceivePacketAndAdvanceTime(rtp_packet.Buffer(), /* packet_time_us */ -1); @@ -7076,7 +6968,7 @@ TEST_F(WebRtcVideoChannelTest, EXPECT_EQ(0u, fake_call_->GetVideoReceiveStreams().size()); // Receive packet on an unsignaled SSRC. - webrtc::RtpPacket rtp_packet; + RtpPacket rtp_packet; rtp_packet.SetPayloadType(GetEngineCodec("VP8").id); rtp_packet.SetSsrc(kSsrcs3[0]); ReceivePacketAndAdvanceTime(rtp_packet.Buffer(), /* packet_time_us */ -1); @@ -7542,7 +7434,7 @@ TEST_F(WebRtcVideoChannelTest, &frame_forwarder)); channel_->SetSend(true); - // Set |scale_resolution_down_by|'s. + // Set `scale_resolution_down_by`'s. auto rtp_parameters = channel_->GetRtpSendParameters(last_ssrc_); ASSERT_EQ(rtp_parameters.encodings.size(), 3u); rtp_parameters.encodings[0].scale_resolution_down_by = 1.0; @@ -7698,7 +7590,7 @@ TEST_F(WebRtcVideoChannelTest, &frame_forwarder)); channel_->SetSend(true); - // Set |scale_resolution_down_by|'s. + // Set `scale_resolution_down_by`'s. auto rtp_parameters = channel_->GetRtpSendParameters(last_ssrc_); ASSERT_EQ(rtp_parameters.encodings.size(), 3u); rtp_parameters.encodings[0].scale_resolution_down_by = 1.0; @@ -7772,28 +7664,6 @@ TEST_F(WebRtcVideoChannelTest, channel_->SetRtpSendParameters(last_ssrc_, parameters).type()); } -TEST_F(WebRtcVideoChannelTest, - SetRtpSendParametersNumTemporalLayersFailsForInvalidModification) { - const size_t kNumSimulcastStreams = 3; - SetUpSimulcast(true, false); - - // Get and set the rtp encoding parameters. - webrtc::RtpParameters parameters = channel_->GetRtpSendParameters(last_ssrc_); - EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size()); - - // No/all layers should be set. - parameters.encodings[0].num_temporal_layers = 1; - EXPECT_EQ(webrtc::RTCErrorType::INVALID_MODIFICATION, - channel_->SetRtpSendParameters(last_ssrc_, parameters).type()); - - // Different values not supported. - parameters.encodings[0].num_temporal_layers = 1; - parameters.encodings[1].num_temporal_layers = 2; - parameters.encodings[2].num_temporal_layers = 2; - EXPECT_EQ(webrtc::RTCErrorType::INVALID_MODIFICATION, - channel_->SetRtpSendParameters(last_ssrc_, parameters).type()); -} - TEST_F(WebRtcVideoChannelTest, GetAndSetRtpSendParametersNumTemporalLayers) { const size_t kNumSimulcastStreams = 3; SetUpSimulcast(true, false); @@ -7833,9 +7703,9 @@ TEST_F(WebRtcVideoChannelTest, NumTemporalLayersPropagatedToEncoder) { // Change the value and set it on the VideoChannel. webrtc::RtpParameters parameters = channel_->GetRtpSendParameters(last_ssrc_); EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size()); - parameters.encodings[0].num_temporal_layers = 2; + parameters.encodings[0].num_temporal_layers = 3; parameters.encodings[1].num_temporal_layers = 2; - parameters.encodings[2].num_temporal_layers = 2; + parameters.encodings[2].num_temporal_layers = 1; EXPECT_TRUE(channel_->SetRtpSendParameters(last_ssrc_, parameters).ok()); // Verify that the new value is propagated down to the encoder. @@ -7844,16 +7714,16 @@ TEST_F(WebRtcVideoChannelTest, NumTemporalLayersPropagatedToEncoder) { webrtc::VideoEncoderConfig encoder_config = stream->GetEncoderConfig().Copy(); EXPECT_EQ(kNumSimulcastStreams, encoder_config.number_of_streams); EXPECT_EQ(kNumSimulcastStreams, encoder_config.simulcast_layers.size()); - EXPECT_EQ(2UL, encoder_config.simulcast_layers[0].num_temporal_layers); + EXPECT_EQ(3UL, encoder_config.simulcast_layers[0].num_temporal_layers); EXPECT_EQ(2UL, encoder_config.simulcast_layers[1].num_temporal_layers); - EXPECT_EQ(2UL, encoder_config.simulcast_layers[2].num_temporal_layers); + EXPECT_EQ(1UL, encoder_config.simulcast_layers[2].num_temporal_layers); // FakeVideoSendStream calls CreateEncoderStreams, test that the vector of // VideoStreams are created appropriately for the simulcast case. EXPECT_EQ(kNumSimulcastStreams, stream->GetVideoStreams().size()); - EXPECT_EQ(2UL, stream->GetVideoStreams()[0].num_temporal_layers); + EXPECT_EQ(3UL, stream->GetVideoStreams()[0].num_temporal_layers); EXPECT_EQ(2UL, stream->GetVideoStreams()[1].num_temporal_layers); - EXPECT_EQ(2UL, stream->GetVideoStreams()[2].num_temporal_layers); + EXPECT_EQ(1UL, stream->GetVideoStreams()[2].num_temporal_layers); // No parameter changed, encoder should not be reconfigured. EXPECT_TRUE(channel_->SetRtpSendParameters(last_ssrc_, parameters).ok()); @@ -7875,29 +7745,28 @@ TEST_F(WebRtcVideoChannelTest, channel_->SetSend(true); frame_forwarder.IncomingCapturedFrame(frame_source_.GetFrame()); - // Change rtp encoding parameters, num_temporal_layers not changed. + // Change rtp encoding parameters. webrtc::RtpParameters parameters = channel_->GetRtpSendParameters(last_ssrc_); EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size()); - parameters.encodings[0].min_bitrate_bps = 33000; + parameters.encodings[0].num_temporal_layers = 2; + parameters.encodings[2].num_temporal_layers = 1; EXPECT_TRUE(channel_->SetRtpSendParameters(last_ssrc_, parameters).ok()); // Verify that no value is propagated down to the encoder. webrtc::VideoEncoderConfig encoder_config = stream->GetEncoderConfig().Copy(); EXPECT_EQ(kNumSimulcastStreams, encoder_config.number_of_streams); EXPECT_EQ(kNumSimulcastStreams, encoder_config.simulcast_layers.size()); - EXPECT_FALSE(encoder_config.simulcast_layers[0].num_temporal_layers); + EXPECT_EQ(2UL, encoder_config.simulcast_layers[0].num_temporal_layers); EXPECT_FALSE(encoder_config.simulcast_layers[1].num_temporal_layers); - EXPECT_FALSE(encoder_config.simulcast_layers[2].num_temporal_layers); + EXPECT_EQ(1UL, encoder_config.simulcast_layers[2].num_temporal_layers); // FakeVideoSendStream calls CreateEncoderStreams, test that the vector of // VideoStreams are created appropriately for the simulcast case. EXPECT_EQ(kNumSimulcastStreams, stream->GetVideoStreams().size()); - EXPECT_EQ(kDefaultNumTemporalLayers, - stream->GetVideoStreams()[0].num_temporal_layers); + EXPECT_EQ(2UL, stream->GetVideoStreams()[0].num_temporal_layers); EXPECT_EQ(kDefaultNumTemporalLayers, stream->GetVideoStreams()[1].num_temporal_layers); - EXPECT_EQ(kDefaultNumTemporalLayers, - stream->GetVideoStreams()[2].num_temporal_layers); + EXPECT_EQ(1UL, stream->GetVideoStreams()[2].num_temporal_layers); EXPECT_TRUE(channel_->SetVideoSend(last_ssrc_, nullptr, nullptr)); } @@ -7934,7 +7803,7 @@ TEST_F(WebRtcVideoChannelTest, // FakeVideoSendStream calls CreateEncoderStreams, test that the vector of // VideoStreams are created appropriately for the simulcast case. - // The maximum |max_framerate| is used, kDefaultVideoMaxFramerate: 60. + // The maximum `max_framerate` is used, kDefaultVideoMaxFramerate: 60. EXPECT_EQ(kNumSimulcastStreams, stream->GetVideoStreams().size()); EXPECT_EQ(15, stream->GetVideoStreams()[0].max_framerate); EXPECT_EQ(kDefaultVideoMaxFramerate, @@ -8701,17 +8570,35 @@ TEST_F(WebRtcVideoChannelTest, EXPECT_FALSE(rtp_parameters.encodings[0].ssrc); // Receive VP8 packet. - webrtc::RtpPacket rtp_packet; + RtpPacket rtp_packet; rtp_packet.SetPayloadType(GetEngineCodec("VP8").id); rtp_packet.SetSsrc(kIncomingUnsignalledSsrc); ReceivePacketAndAdvanceTime(rtp_packet.Buffer(), /* packet_time_us */ -1); - // The |ssrc| member should still be unset. + // The `ssrc` member should still be unset. rtp_parameters = channel_->GetDefaultRtpReceiveParameters(); ASSERT_EQ(1u, rtp_parameters.encodings.size()); EXPECT_FALSE(rtp_parameters.encodings[0].ssrc); } +// Test that if a default stream is created for a non-primary stream (for +// example, RTX before we know it's RTX), we are still able to explicitly add +// the stream later. +TEST_F(WebRtcVideoChannelTest, + AddReceiveStreamAfterReceivingNonPrimaryUnsignaledSsrc) { + // Receive VP8 RTX packet. + RtpPacket rtp_packet; + const cricket::VideoCodec vp8 = GetEngineCodec("VP8"); + rtp_packet.SetPayloadType(default_apt_rtx_types_[vp8.id]); + rtp_packet.SetSsrc(2); + ReceivePacketAndAdvanceTime(rtp_packet.Buffer(), /* packet_time_us */ -1); + EXPECT_EQ(1u, fake_call_->GetVideoReceiveStreams().size()); + + cricket::StreamParams params = cricket::StreamParams::CreateLegacy(1); + params.AddFidSsrc(1, 2); + EXPECT_TRUE(channel_->AddRecvStream(params)); +} + void WebRtcVideoChannelTest::TestReceiverLocalSsrcConfiguration( bool receiver_first) { EXPECT_TRUE(channel_->SetSendParameters(send_parameters_)); @@ -8899,7 +8786,6 @@ class WebRtcVideoChannelSimulcastTest : public ::testing::Test { ASSERT_EQ(expected_streams.size(), video_streams.size()); size_t num_streams = video_streams.size(); - int total_max_bitrate_bps = 0; for (size_t i = 0; i < num_streams; ++i) { EXPECT_EQ(expected_streams[i].width, video_streams[i].width); EXPECT_EQ(expected_streams[i].height, video_streams[i].height); @@ -8930,12 +8816,6 @@ class WebRtcVideoChannelSimulcastTest : public ::testing::Test { EXPECT_EQ(expected_streams[i].num_temporal_layers, video_streams[i].num_temporal_layers); } - - if (i == num_streams - 1) { - total_max_bitrate_bps += video_streams[i].max_bitrate_bps; - } else { - total_max_bitrate_bps += video_streams[i].target_bitrate_bps; - } } EXPECT_TRUE(channel_->SetVideoSend(ssrcs.front(), nullptr, nullptr)); diff --git a/media/engine/webrtc_voice_engine.cc b/media/engine/webrtc_voice_engine.cc index aa80c8724a..829cb82af1 100644 --- a/media/engine/webrtc_voice_engine.cc +++ b/media/engine/webrtc_voice_engine.cc @@ -35,6 +35,7 @@ #include "modules/audio_mixer/audio_mixer_impl.h" #include "modules/audio_processing/aec_dump/aec_dump_factory.h" #include "modules/audio_processing/include/audio_processing.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "rtc_base/arraysize.h" #include "rtc_base/byte_order.h" #include "rtc_base/experiments/field_trial_parser.h" @@ -66,6 +67,8 @@ RTC_POP_IGNORING_WUNDEF() namespace cricket { namespace { +using ::webrtc::ParseRtpSsrc; + constexpr size_t kMaxUnsignaledRecvStreams = 4; constexpr int kNackRtpHistoryMs = 5000; @@ -149,8 +152,8 @@ absl::optional GetAudioNetworkAdaptorConfig( const AudioOptions& options) { if (options.audio_network_adaptor && *options.audio_network_adaptor && options.audio_network_adaptor_config) { - // Turn on audio network adaptor only when |options_.audio_network_adaptor| - // equals true and |options_.audio_network_adaptor_config| has a value. + // Turn on audio network adaptor only when `options_.audio_network_adaptor` + // equals true and `options_.audio_network_adaptor_config` has a value. return options.audio_network_adaptor_config; } return absl::nullopt; @@ -168,8 +171,8 @@ int MinPositive(int a, int b) { return std::min(a, b); } -// |max_send_bitrate_bps| is the bitrate from "b=" in SDP. -// |rtp_max_bitrate_bps| is the bitrate from RtpSender::SetParameters. +// `max_send_bitrate_bps` is the bitrate from "b=" in SDP. +// `rtp_max_bitrate_bps` is the bitrate from RtpSender::SetParameters. absl::optional ComputeSendBitrate(int max_send_bitrate_bps, absl::optional rtp_max_bitrate_bps, const webrtc::AudioCodecSpec& spec) { @@ -183,8 +186,8 @@ absl::optional ComputeSendBitrate(int max_send_bitrate_bps, } if (bps < spec.info.min_bitrate_bps) { - // If codec is not multi-rate and |bps| is less than the fixed bitrate then - // fail. If codec is not multi-rate and |bps| exceeds or equal the fixed + // If codec is not multi-rate and `bps` is less than the fixed bitrate then + // fail. If codec is not multi-rate and `bps` exceeds or equal the fixed // bitrate then ignore. RTC_LOG(LS_ERROR) << "Failed to set codec " << spec.format.name << " to bitrate " << bps @@ -207,6 +210,11 @@ bool IsEnabled(const webrtc::WebRtcKeyValueConfig& config, return absl::StartsWith(config.Lookup(trial), "Enabled"); } +bool IsDisabled(const webrtc::WebRtcKeyValueConfig& config, + absl::string_view trial) { + return absl::StartsWith(config.Lookup(trial), "Disabled"); +} + struct AdaptivePtimeConfig { bool enabled = false; webrtc::DataRate min_payload_bitrate = webrtc::DataRate::KilobitsPerSec(16); @@ -247,6 +255,7 @@ webrtc::AudioReceiveStream::Config BuildReceiveStreamConfig( uint32_t local_ssrc, bool use_transport_cc, bool use_nack, + bool enable_non_sender_rtt, const std::vector& stream_ids, const std::vector& extensions, webrtc::Transport* rtcp_send_transport, @@ -270,6 +279,7 @@ webrtc::AudioReceiveStream::Config BuildReceiveStreamConfig( } config.rtp.extensions = extensions; config.rtcp_send_transport = rtcp_send_transport; + config.enable_non_sender_rtt = enable_non_sender_rtt; config.decoder_factory = decoder_factory; config.decoder_map = decoder_map; config.codec_pair_id = codec_pair_id; @@ -301,8 +311,8 @@ WebRtcVoiceEngine::WebRtcVoiceEngine( audio_mixer_(audio_mixer), apm_(audio_processing), audio_frame_processor_(audio_frame_processor), - audio_red_for_opus_trial_enabled_( - IsEnabled(trials, "WebRTC-Audio-Red-For-Opus")), + audio_red_for_opus_enabled_( + !IsDisabled(trials, "WebRTC-Audio-Red-For-Opus")), minimized_remsampling_on_mobile_trial_enabled_( IsEnabled(trials, "WebRTC-Audio-MinimizeResamplingOnMobile")) { // This may be called from any thread, so detach thread checkers. @@ -333,6 +343,7 @@ void WebRtcVoiceEngine::Init() { RTC_LOG(LS_INFO) << "WebRtcVoiceEngine::Init"; // TaskQueue expects to be created/destroyed on the same thread. + RTC_DCHECK(!low_priority_worker_queue_); low_priority_worker_queue_.reset( new rtc::TaskQueue(task_queue_factory_->CreateTaskQueue( "rtc-low-prio", webrtc::TaskQueueFactory::Priority::LOW))); @@ -393,15 +404,12 @@ void WebRtcVoiceEngine::Init() { options.noise_suppression = true; options.typing_detection = true; #endif - options.experimental_ns = false; options.highpass_filter = true; options.stereo_swapping = false; options.audio_jitter_buffer_max_packets = 200; options.audio_jitter_buffer_fast_accelerate = false; options.audio_jitter_buffer_min_delay_ms = 0; options.audio_jitter_buffer_enable_rtx_handling = false; - options.experimental_agc = false; - options.residual_echo_detector = true; bool error = ApplyOptions(options); RTC_DCHECK(error); } @@ -454,17 +462,14 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) { // Override noise suppression options for Android. #if defined(WEBRTC_ANDROID) options.typing_detection = false; - options.experimental_ns = false; #endif // Set and adjust gain control options. #if defined(WEBRTC_IOS) // On iOS, VPIO provides built-in AGC. options.auto_gain_control = false; - options.experimental_agc = false; RTC_LOG(LS_INFO) << "Always disable AGC on iOS. Use built-in instead."; #elif defined(WEBRTC_ANDROID) - options.experimental_agc = false; #endif #if defined(WEBRTC_IOS) || defined(WEBRTC_ANDROID) @@ -572,17 +577,6 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) { return true; } - webrtc::Config config; - - if (options.experimental_ns) { - experimental_ns_ = options.experimental_ns; - } - if (experimental_ns_) { - RTC_LOG(LS_INFO) << "Experimental ns is enabled? " << *experimental_ns_; - config.Set( - new webrtc::ExperimentalNs(*experimental_ns_)); - } - webrtc::AudioProcessing::Config apm_config = ap->GetConfig(); if (options.echo_cancellation) { @@ -600,30 +594,12 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) { apm_config.gain_controller1.mode = apm_config.gain_controller1.kAdaptiveAnalog; #endif - constexpr int kMinVolumeLevel = 0; - constexpr int kMaxVolumeLevel = 255; - apm_config.gain_controller1.analog_level_minimum = kMinVolumeLevel; - apm_config.gain_controller1.analog_level_maximum = kMaxVolumeLevel; - } - if (options.tx_agc_target_dbov) { - apm_config.gain_controller1.target_level_dbfs = *options.tx_agc_target_dbov; - } - if (options.tx_agc_digital_compression_gain) { - apm_config.gain_controller1.compression_gain_db = - *options.tx_agc_digital_compression_gain; - } - if (options.tx_agc_limiter) { - apm_config.gain_controller1.enable_limiter = *options.tx_agc_limiter; } if (options.highpass_filter) { apm_config.high_pass_filter.enabled = *options.highpass_filter; } - if (options.residual_echo_detector) { - apm_config.residual_echo_detector.enabled = *options.residual_echo_detector; - } - if (options.noise_suppression) { const bool enabled = *options.noise_suppression; apm_config.noise_suppression.enabled = enabled; @@ -633,9 +609,7 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) { } if (options.typing_detection) { - RTC_LOG(LS_INFO) << "Typing detection is enabled? " - << *options.typing_detection; - apm_config.voice_detection.enabled = *options.typing_detection; + RTC_LOG(LS_WARNING) << "Typing detection is requested, but unsupported."; } ap->ApplyConfig(apm_config); @@ -657,12 +631,10 @@ WebRtcVoiceEngine::GetRtpHeaderExtensions() const { RTC_DCHECK(signal_thread_checker_.IsCurrent()); std::vector result; int id = 1; - for (const auto& uri : - {webrtc::RtpExtension::kAudioLevelUri, - webrtc::RtpExtension::kAbsSendTimeUri, - webrtc::RtpExtension::kTransportSequenceNumberUri, - webrtc::RtpExtension::kMidUri, webrtc::RtpExtension::kRidUri, - webrtc::RtpExtension::kRepairedRidUri}) { + for (const auto& uri : {webrtc::RtpExtension::kAudioLevelUri, + webrtc::RtpExtension::kAbsSendTimeUri, + webrtc::RtpExtension::kTransportSequenceNumberUri, + webrtc::RtpExtension::kMidUri}) { result.emplace_back(uri, id++, webrtc::RtpTransceiverDirection::kSendRecv); } return result; @@ -766,8 +738,10 @@ std::vector WebRtcVoiceEngine::CollectCodecs( out.push_back(codec); - if (codec.name == kOpusCodecName && audio_red_for_opus_trial_enabled_) { - map_format({kRedCodecName, 48000, 2}, &out); + if (codec.name == kOpusCodecName && audio_red_for_opus_enabled_) { + std::string redFmtp = + rtc::ToString(codec.id) + "/" + rtc::ToString(codec.id); + map_format({kRedCodecName, 48000, 2, {{"", redFmtp}}}, &out); } } } @@ -1000,7 +974,7 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream number_of_frames, sample_rate, audio_frame->speech_type_, audio_frame->vad_activity_, number_of_channels); // TODO(bugs.webrtc.org/10739): add dcheck that - // |absolute_capture_timestamp_ms| always receives a value. + // `absolute_capture_timestamp_ms` always receives a value. if (absolute_capture_timestamp_ms) { audio_frame->set_absolute_capture_timestamp_ms( *absolute_capture_timestamp_ms); @@ -1008,11 +982,11 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream stream_->SendAudioData(std::move(audio_frame)); } - // Callback from the |source_| when it is going away. In case Start() has + // Callback from the `source_` when it is going away. In case Start() has // never been called, this callback won't be triggered. void OnClose() override { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - // Set |source_| to nullptr to make sure no more callback will get into + // Set `source_` to nullptr to make sure no more callback will get into // the source. source_ = nullptr; UpdateSendState(); @@ -1237,6 +1211,11 @@ class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream { use_nack ? kNackRtpHistoryMs : 0); } + void SetNonSenderRttMeasurement(bool enabled) { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + stream_->SetNonSenderRttMeasurement(enabled); + } + void SetRtpExtensions(const std::vector& extensions) { RTC_DCHECK_RUN_ON(&worker_thread_checker_); stream_->SetRtpExtensions(extensions); @@ -1327,14 +1306,14 @@ WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel( const AudioOptions& options, const webrtc::CryptoOptions& crypto_options, webrtc::Call* call) - : VoiceMediaChannel(config, call->network_thread()), + : VoiceMediaChannel(call->network_thread(), config.enable_dscp), worker_thread_(call->worker_thread()), engine_(engine), call_(call), audio_config_(config.audio), crypto_options_(crypto_options), - audio_red_for_opus_trial_enabled_( - IsEnabled(call->trials(), "WebRTC-Audio-Red-For-Opus")) { + audio_red_for_opus_enabled_( + !IsDisabled(call->trials(), "WebRTC-Audio-Red-For-Opus")) { network_thread_checker_.Detach(); RTC_LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel"; RTC_DCHECK(call); @@ -1368,7 +1347,7 @@ bool WebRtcVoiceMediaChannel::SetSendParameters( return false; } - if (!ValidateRtpExtensions(params.extensions)) { + if (!ValidateRtpExtensions(params.extensions, send_rtp_extensions_)) { return false; } @@ -1414,7 +1393,7 @@ bool WebRtcVoiceMediaChannel::SetRecvParameters( return false; } - if (!ValidateRtpExtensions(params.extensions)) { + if (!ValidateRtpExtensions(params.extensions, recv_rtp_extensions_)) { return false; } std::vector filtered_extensions = FilterRtpExtensions( @@ -1492,11 +1471,11 @@ webrtc::RTCError WebRtcVoiceMediaChannel::SetRtpSendParameters( } // TODO(minyue): The following legacy actions go into - // |WebRtcAudioSendStream::SetRtpParameters()| which is called at the end, + // `WebRtcAudioSendStream::SetRtpParameters()` which is called at the end, // though there are two difference: - // 1. |WebRtcVoiceMediaChannel::SetChannelSendParameters()| only calls - // |SetSendCodec| while |WebRtcAudioSendStream::SetRtpParameters()| calls - // |SetSendCodecs|. The outcome should be the same. + // 1. `WebRtcVoiceMediaChannel::SetChannelSendParameters()` only calls + // `SetSendCodec` while `WebRtcAudioSendStream::SetRtpParameters()` calls + // `SetSendCodecs`. The outcome should be the same. // 2. AudioSendStream can be recreated. // Codecs are handled at the WebRtcVoiceMediaChannel level. @@ -1595,8 +1574,7 @@ bool WebRtcVoiceMediaChannel::SetRecvCodecs( } auto format = AudioCodecToSdpAudioFormat(codec); if (!IsCodec(codec, kCnCodecName) && !IsCodec(codec, kDtmfCodecName) && - (!audio_red_for_opus_trial_enabled_ || - !IsCodec(codec, kRedCodecName)) && + (!audio_red_for_opus_enabled_ || !IsCodec(codec, kRedCodecName)) && !engine()->decoder_factory_->IsSupportedDecoder(format)) { RTC_LOG(LS_ERROR) << "Unsupported codec: " << rtc::ToString(format); return false; @@ -1647,6 +1625,37 @@ bool WebRtcVoiceMediaChannel::SetRecvCodecs( return true; } +// Utility function to check if RED codec and its parameters match a codec spec. +bool CheckRedParameters( + const AudioCodec& red_codec, + const webrtc::AudioSendStream::Config::SendCodecSpec& send_codec_spec) { + if (red_codec.clockrate != send_codec_spec.format.clockrate_hz || + red_codec.channels != send_codec_spec.format.num_channels) { + return false; + } + + // Check the FMTP line for the empty parameter which should match + // /[/...] + auto red_parameters = red_codec.params.find(""); + if (red_parameters == red_codec.params.end()) { + RTC_LOG(LS_WARNING) << "audio/RED missing fmtp parameters."; + return false; + } + std::vector redundant_payloads = + rtc::split(red_parameters->second, '/'); + // 32 is chosen as a maximum upper bound for consistency with the + // red payload splitter. + if (redundant_payloads.size() < 2 || redundant_payloads.size() > 32) { + return false; + } + for (auto pt : redundant_payloads) { + if (pt != rtc::ToString(send_codec_spec.payload_type)) { + return false; + } + } + return true; +} + // Utility function called from SetSendParameters() to extract current send // codec settings from the given list of codecs (originally from SDP). Both send // and receive streams may be reconfigured based on the new settings. @@ -1708,6 +1717,7 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs( } send_codec_spec->transport_cc_enabled = HasTransportCc(voice_codec); send_codec_spec->nack_enabled = HasNack(voice_codec); + send_codec_spec->enable_non_sender_rtt = HasRrtr(voice_codec); bitrate_config = GetBitrateConfigForCodec(voice_codec); break; } @@ -1750,15 +1760,14 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs( } } - if (audio_red_for_opus_trial_enabled_) { + if (audio_red_for_opus_enabled_) { // Loop through the codecs to find the RED codec that matches opus // with respect to clockrate and number of channels. size_t red_codec_position = 0; for (const AudioCodec& red_codec : codecs) { if (red_codec_position < send_codec_position && IsCodec(red_codec, kRedCodecName) && - red_codec.clockrate == send_codec_spec->format.clockrate_hz && - red_codec.channels == send_codec_spec->format.num_channels) { + CheckRedParameters(red_codec, *send_codec_spec)) { send_codec_spec->red_payload_type = red_codec.id; break; } @@ -1783,16 +1792,27 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs( // preferred send codec, and in that case reconfigure all receive streams. if (recv_transport_cc_enabled_ != send_codec_spec_->transport_cc_enabled || recv_nack_enabled_ != send_codec_spec_->nack_enabled) { - RTC_LOG(LS_INFO) << "Recreate all the receive streams because the send " - "codec has changed."; + RTC_LOG(LS_INFO) << "Changing transport cc and NACK status on receive " + "streams."; recv_transport_cc_enabled_ = send_codec_spec_->transport_cc_enabled; recv_nack_enabled_ = send_codec_spec_->nack_enabled; + enable_non_sender_rtt_ = send_codec_spec_->enable_non_sender_rtt; for (auto& kv : recv_streams_) { kv.second->SetUseTransportCc(recv_transport_cc_enabled_, recv_nack_enabled_); } } + // Check if the receive-side RTT status has changed on the preferred send + // codec, in that case reconfigure all receive streams. + if (enable_non_sender_rtt_ != send_codec_spec_->enable_non_sender_rtt) { + RTC_LOG(LS_INFO) << "Changing receive-side RTT status on receive streams."; + enable_non_sender_rtt_ = send_codec_spec_->enable_non_sender_rtt; + for (auto& kv : recv_streams_) { + kv.second->SetNonSenderRttMeasurement(enable_non_sender_rtt_); + } + } + send_codecs_ = codecs; return true; } @@ -1816,13 +1836,15 @@ void WebRtcVoiceMediaChannel::SetSend(bool send) { return; } - // Apply channel specific options, and initialize the ADM for recording (this - // may take time on some platforms, e.g. Android). + // Apply channel specific options. if (send) { engine()->ApplyOptions(options_); - // InitRecording() may return an error if the ADM is already recording. - if (!engine()->adm()->RecordingIsInitialized() && + // Initialize the ADM for recording (this may take time on some platforms, + // e.g. Android). + if (options_.init_recording_on_send.value_or(true) && + // InitRecording() may return an error if the ADM is already recording. + !engine()->adm()->RecordingIsInitialized() && !engine()->adm()->Recording()) { if (engine()->adm()->InitRecording() != 0) { RTC_LOG(LS_WARNING) << "Failed to initialize recording"; @@ -1956,9 +1978,9 @@ bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) { // Create a new channel for receiving audio data. auto config = BuildReceiveStreamConfig( ssrc, receiver_reports_ssrc_, recv_transport_cc_enabled_, - recv_nack_enabled_, sp.stream_ids(), recv_rtp_extensions_, this, - engine()->decoder_factory_, decoder_map_, codec_pair_id_, - engine()->audio_jitter_buffer_max_packets_, + recv_nack_enabled_, enable_non_sender_rtt_, sp.stream_ids(), + recv_rtp_extensions_, this, engine()->decoder_factory_, decoder_map_, + codec_pair_id_, engine()->audio_jitter_buffer_max_packets_, engine()->audio_jitter_buffer_fast_accelerate_, engine()->audio_jitter_buffer_min_delay_ms_, engine()->audio_jitter_buffer_enable_rtx_handling_, @@ -1995,7 +2017,7 @@ void WebRtcVoiceMediaChannel::ResetUnsignaledRecvStream() { RTC_DCHECK_RUN_ON(worker_thread_); RTC_LOG(LS_INFO) << "ResetUnsignaledRecvStream."; unsignaled_stream_params_ = StreamParams(); - // Create a copy since RemoveRecvStream will modify |unsignaled_recv_ssrcs_|. + // Create a copy since RemoveRecvStream will modify `unsignaled_recv_ssrcs_`. std::vector to_remove = unsignaled_recv_ssrcs_; for (uint32_t ssrc : to_remove) { RemoveRecvStream(ssrc); @@ -2179,10 +2201,7 @@ void WebRtcVoiceMediaChannel::OnPacketReceived(rtc::CopyOnWriteBuffer packet, // Create an unsignaled receive stream for this previously not received // ssrc. If there already is N unsignaled receive streams, delete the // oldest. See: https://bugs.chromium.org/p/webrtc/issues/detail?id=5208 - uint32_t ssrc = 0; - if (!GetRtpSsrc(packet.cdata(), packet.size(), &ssrc)) { - return; - } + uint32_t ssrc = ParseRtpSsrc(packet); RTC_DCHECK(!absl::c_linear_search(unsignaled_recv_ssrcs_, ssrc)); // Add new stream. @@ -2241,14 +2260,15 @@ void WebRtcVoiceMediaChannel::OnPacketSent(const rtc::SentPacket& sent_packet) { } void WebRtcVoiceMediaChannel::OnNetworkRouteChanged( - const std::string& transport_name, + absl::string_view transport_name, const rtc::NetworkRoute& network_route) { RTC_DCHECK_RUN_ON(&network_thread_checker_); call_->OnAudioTransportOverheadChanged(network_route.packet_overhead); worker_thread_->PostTask(ToQueuedTask( - task_safety_, [this, name = transport_name, route = network_route] { + task_safety_, + [this, name = std::string(transport_name), route = network_route] { RTC_DCHECK_RUN_ON(worker_thread_); call_->GetTransportControllerSend()->OnNetworkRouteChanged(name, route); })); @@ -2321,6 +2341,7 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info, sinfo.packets_lost = stats.packets_lost; sinfo.fraction_lost = stats.fraction_lost; sinfo.nacks_rcvd = stats.nacks_rcvd; + sinfo.target_bitrate = stats.target_bitrate_bps; sinfo.codec_name = stats.codec_name; sinfo.codec_payload_type = stats.codec_payload_type; sinfo.jitter_ms = stats.jitter_ms; @@ -2365,6 +2386,7 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info, rinfo.fec_packets_received = stats.fec_packets_received; rinfo.fec_packets_discarded = stats.fec_packets_discarded; rinfo.packets_lost = stats.packets_lost; + rinfo.packets_discarded = stats.packets_discarded; rinfo.codec_name = stats.codec_name; rinfo.codec_payload_type = stats.codec_payload_type; rinfo.jitter_ms = stats.jitter_ms; @@ -2419,6 +2441,9 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info, rinfo.sender_reports_packets_sent = stats.sender_reports_packets_sent; rinfo.sender_reports_bytes_sent = stats.sender_reports_bytes_sent; rinfo.sender_reports_reports_count = stats.sender_reports_reports_count; + rinfo.round_trip_time = stats.round_trip_time; + rinfo.round_trip_time_measurements = stats.round_trip_time_measurements; + rinfo.total_round_trip_time = stats.total_round_trip_time; if (recv_nack_enabled_) { rinfo.nacks_sent = stats.nacks_sent; diff --git a/media/engine/webrtc_voice_engine.h b/media/engine/webrtc_voice_engine.h index 147688b0e0..1061d7a129 100644 --- a/media/engine/webrtc_voice_engine.h +++ b/media/engine/webrtc_voice_engine.h @@ -124,18 +124,15 @@ class WebRtcVoiceEngine final : public VoiceEngineInterface { bool is_dumping_aec_ = false; bool initialized_ = false; - // Cache experimental_ns and apply in case they are missing in the audio - // options. - absl::optional experimental_ns_; // Jitter buffer settings for new streams. size_t audio_jitter_buffer_max_packets_ = 200; bool audio_jitter_buffer_fast_accelerate_ = false; int audio_jitter_buffer_min_delay_ms_ = 0; bool audio_jitter_buffer_enable_rtx_handling_ = false; - // If this field trial is enabled, we will negotiate and use RFC 2198 + // If this field is enabled, we will negotiate and use RFC 2198 // redundancy for opus audio. - const bool audio_red_for_opus_trial_enabled_; + const bool audio_red_for_opus_enabled_; const bool minimized_remsampling_on_mobile_trial_enabled_; }; @@ -209,7 +206,7 @@ class WebRtcVoiceMediaChannel final : public VoiceMediaChannel, void OnPacketReceived(rtc::CopyOnWriteBuffer packet, int64_t packet_time_us) override; void OnPacketSent(const rtc::SentPacket& sent_packet) override; - void OnNetworkRouteChanged(const std::string& transport_name, + void OnNetworkRouteChanged(absl::string_view transport_name, const rtc::NetworkRoute& network_route) override; void OnReadyToSend(bool ready) override; bool GetStats(VoiceMediaInfo* info, bool get_and_clear_legacy_stats) override; @@ -277,6 +274,7 @@ class WebRtcVoiceMediaChannel final : public VoiceMediaChannel, int dtmf_payload_freq_ = -1; bool recv_transport_cc_enabled_ = false; bool recv_nack_enabled_ = false; + bool enable_non_sender_rtt_ = false; bool playout_ = false; bool send_ = false; webrtc::Call* const call_ = nullptr; @@ -328,7 +326,7 @@ class WebRtcVoiceMediaChannel final : public VoiceMediaChannel, rtc::scoped_refptr unsignaled_frame_decryptor_; - const bool audio_red_for_opus_trial_enabled_; + const bool audio_red_for_opus_enabled_; }; } // namespace cricket diff --git a/media/engine/webrtc_voice_engine_unittest.cc b/media/engine/webrtc_voice_engine_unittest.cc index c570b1a03a..ad15a638bb 100644 --- a/media/engine/webrtc_voice_engine_unittest.cc +++ b/media/engine/webrtc_voice_engine_unittest.cc @@ -148,7 +148,7 @@ TEST(WebRtcVoiceEngineTestStubLibrary, StartupShutdown) { AdmSetupExpectations(adm); rtc::scoped_refptr> apm = use_null_apm ? nullptr - : new rtc::RefCountedObject< + : rtc::make_ref_counted< StrictMock>(); webrtc::AudioProcessing::Config apm_config; @@ -188,7 +188,7 @@ class WebRtcVoiceEngineTestFake : public ::testing::TestWithParam { adm_(webrtc::test::MockAudioDeviceModule::CreateStrict()), apm_(use_null_apm_ ? nullptr - : new rtc::RefCountedObject< + : rtc::make_ref_counted< StrictMock>()), call_(), override_field_trials_(field_trials) { @@ -221,7 +221,6 @@ class WebRtcVoiceEngineTestFake : public ::testing::TestWithParam { // Default Options. VerifyEchoCancellationSettings(/*enabled=*/true); EXPECT_TRUE(IsHighPassFilterEnabled()); - EXPECT_TRUE(IsTypingDetectionEnabled()); EXPECT_TRUE(apm_config_.noise_suppression.enabled); EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel); VerifyGainControlEnabledCorrectly(); @@ -305,9 +304,15 @@ class WebRtcVoiceEngineTestFake : public ::testing::TestWithParam { void SetSend(bool enable) { ASSERT_TRUE(channel_); if (enable) { - EXPECT_CALL(*adm_, RecordingIsInitialized()).WillOnce(Return(false)); - EXPECT_CALL(*adm_, Recording()).WillOnce(Return(false)); - EXPECT_CALL(*adm_, InitRecording()).WillOnce(Return(0)); + EXPECT_CALL(*adm_, RecordingIsInitialized()) + .Times(::testing::AtMost(1)) + .WillOnce(Return(false)); + EXPECT_CALL(*adm_, Recording()) + .Times(::testing::AtMost(1)) + .WillOnce(Return(false)); + EXPECT_CALL(*adm_, InitRecording()) + .Times(::testing::AtMost(1)) + .WillOnce(Return(0)); } channel_->SetSend(enable); } @@ -395,10 +400,10 @@ class WebRtcVoiceEngineTestFake : public ::testing::TestWithParam { } // Test that send bandwidth is set correctly. - // |codec| is the codec under test. - // |max_bitrate| is a parameter to set to SetMaxSendBandwidth(). - // |expected_result| is the expected result from SetMaxSendBandwidth(). - // |expected_bitrate| is the expected audio bitrate afterward. + // `codec` is the codec under test. + // `max_bitrate` is a parameter to set to SetMaxSendBandwidth(). + // `expected_result` is the expected result from SetMaxSendBandwidth(). + // `expected_bitrate` is the expected audio bitrate afterward. void TestMaxSendBandwidth(const cricket::AudioCodec& codec, int max_bitrate, bool expected_result, @@ -760,8 +765,6 @@ class WebRtcVoiceEngineTestFake : public ::testing::TestWithParam { void VerifyGainControlEnabledCorrectly() { EXPECT_TRUE(apm_config_.gain_controller1.enabled); EXPECT_EQ(kDefaultAgcMode, apm_config_.gain_controller1.mode); - EXPECT_EQ(0, apm_config_.gain_controller1.analog_level_minimum); - EXPECT_EQ(255, apm_config_.gain_controller1.analog_level_maximum); } void VerifyGainControlDefaultSettings() { @@ -785,10 +788,6 @@ class WebRtcVoiceEngineTestFake : public ::testing::TestWithParam { return apm_config_.high_pass_filter.enabled; } - bool IsTypingDetectionEnabled() { - return apm_config_.voice_detection.enabled; - } - protected: const bool use_null_apm_; std::unique_ptr task_queue_factory_; @@ -1022,23 +1021,25 @@ TEST_P(WebRtcVoiceEngineTestFake, ChangeRecvCodecPayloadType) { EXPECT_TRUE(channel_->SetRecvParameters(parameters)); } -// Test that we set Opus/Red under the field trial. -TEST_P(WebRtcVoiceEngineTestFake, RecvRed) { - webrtc::test::ScopedFieldTrials override_field_trials( - "WebRTC-Audio-Red-For-Opus/Enabled/"); - +// Test that we do allow setting Opus/Red by default. +TEST_P(WebRtcVoiceEngineTestFake, RecvRedDefault) { EXPECT_TRUE(SetupRecvStream()); cricket::AudioRecvParameters parameters; parameters.codecs.push_back(kOpusCodec); parameters.codecs.push_back(kRed48000Codec); + parameters.codecs[1].params[""] = "111/111"; EXPECT_TRUE(channel_->SetRecvParameters(parameters)); EXPECT_THAT(GetRecvStreamConfig(kSsrcX).decoder_map, (ContainerEq>( - {{111, {"opus", 48000, 2}}, {112, {"red", 48000, 2}}}))); + {{111, {"opus", 48000, 2}}, + {112, {"red", 48000, 2, {{"", "111/111"}}}}}))); } -// Test that we do not allow setting Opus/Red by default. -TEST_P(WebRtcVoiceEngineTestFake, RecvRedDefault) { +// Test that we disable Opus/Red with the kill switch. +TEST_P(WebRtcVoiceEngineTestFake, RecvRed) { + webrtc::test::ScopedFieldTrials override_field_trials( + "WebRTC-Audio-Red-For-Opus/Disabled/"); + EXPECT_TRUE(SetupRecvStream()); cricket::AudioRecvParameters parameters; parameters.codecs.push_back(kOpusCodec); @@ -1470,7 +1471,7 @@ TEST_P(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersWithUnsignaledSsrc) { // Receive PCMU packet (SSRC=1). DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame)); - // The |ssrc| member should still be unset. + // The `ssrc` member should still be unset. rtp_parameters = channel_->GetDefaultRtpReceiveParameters(); ASSERT_EQ(1u, rtp_parameters.encodings.size()); EXPECT_FALSE(rtp_parameters.encodings[0].ssrc); @@ -1495,15 +1496,13 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecs) { EXPECT_FALSE(channel_->CanInsertDtmf()); } -// Test that we use Opus/Red under the field trial when it is -// listed as the first codec. +// Test that we use Opus/Red by default when it is +// listed as the first codec and there is an fmtp line. TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsRed) { - webrtc::test::ScopedFieldTrials override_field_trials( - "WebRTC-Audio-Red-For-Opus/Enabled/"); - EXPECT_TRUE(SetupSendStream()); cricket::AudioSendParameters parameters; parameters.codecs.push_back(kRed48000Codec); + parameters.codecs[0].params[""] = "111/111"; parameters.codecs.push_back(kOpusCodec); SetSendParameters(parameters); const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec; @@ -1512,20 +1511,74 @@ TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsRed) { EXPECT_EQ(112, send_codec_spec.red_payload_type); } -// Test that we do not use Opus/Red under the field trial by default. +// Test that we do not use Opus/Red by default when it is +// listed as the first codec but there is no fmtp line. +TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsRedNoFmtp) { + EXPECT_TRUE(SetupSendStream()); + cricket::AudioSendParameters parameters; + parameters.codecs.push_back(kRed48000Codec); + parameters.codecs.push_back(kOpusCodec); + SetSendParameters(parameters); + const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec; + EXPECT_EQ(111, send_codec_spec.payload_type); + EXPECT_STRCASEEQ("opus", send_codec_spec.format.name.c_str()); + EXPECT_EQ(absl::nullopt, send_codec_spec.red_payload_type); +} + +// Test that we do not use Opus/Red by default. TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsRedDefault) { - webrtc::test::ScopedFieldTrials override_field_trials( - "WebRTC-Audio-Red-For-Opus/Enabled/"); + EXPECT_TRUE(SetupSendStream()); + cricket::AudioSendParameters parameters; + parameters.codecs.push_back(kOpusCodec); + parameters.codecs.push_back(kRed48000Codec); + parameters.codecs[1].params[""] = "111/111"; + SetSendParameters(parameters); + const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec; + EXPECT_EQ(111, send_codec_spec.payload_type); + EXPECT_STRCASEEQ("opus", send_codec_spec.format.name.c_str()); + EXPECT_EQ(absl::nullopt, send_codec_spec.red_payload_type); +} +// Test that the RED fmtp line must match the payload type. +TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsRedFmtpMismatch) { EXPECT_TRUE(SetupSendStream()); cricket::AudioSendParameters parameters; + parameters.codecs.push_back(kRed48000Codec); + parameters.codecs[0].params[""] = "8/8"; parameters.codecs.push_back(kOpusCodec); + SetSendParameters(parameters); + const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec; + EXPECT_EQ(111, send_codec_spec.payload_type); + EXPECT_STRCASEEQ("opus", send_codec_spec.format.name.c_str()); + EXPECT_EQ(absl::nullopt, send_codec_spec.red_payload_type); +} + +// Test that the RED fmtp line must show 2..32 payloads. +TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsRedFmtpAmountOfRedundancy) { + EXPECT_TRUE(SetupSendStream()); + cricket::AudioSendParameters parameters; parameters.codecs.push_back(kRed48000Codec); + parameters.codecs[0].params[""] = "111"; + parameters.codecs.push_back(kOpusCodec); SetSendParameters(parameters); const auto& send_codec_spec = *GetSendStreamConfig(kSsrcX).send_codec_spec; EXPECT_EQ(111, send_codec_spec.payload_type); EXPECT_STRCASEEQ("opus", send_codec_spec.format.name.c_str()); EXPECT_EQ(absl::nullopt, send_codec_spec.red_payload_type); + for (int i = 1; i < 32; i++) { + parameters.codecs[0].params[""] += "/111"; + SetSendParameters(parameters); + const auto& send_codec_spec2 = *GetSendStreamConfig(kSsrcX).send_codec_spec; + EXPECT_EQ(111, send_codec_spec2.payload_type); + EXPECT_STRCASEEQ("opus", send_codec_spec2.format.name.c_str()); + EXPECT_EQ(112, send_codec_spec2.red_payload_type); + } + parameters.codecs[0].params[""] += "/111"; + SetSendParameters(parameters); + const auto& send_codec_spec3 = *GetSendStreamConfig(kSsrcX).send_codec_spec; + EXPECT_EQ(111, send_codec_spec3.payload_type); + EXPECT_STRCASEEQ("opus", send_codec_spec3.format.name.c_str()); + EXPECT_EQ(absl::nullopt, send_codec_spec3.red_payload_type); } // Test that WebRtcVoiceEngine reconfigures, rather than recreates its @@ -2421,58 +2474,6 @@ TEST_P(WebRtcVoiceEngineTestFake, PlayoutWithMultipleStreams) { EXPECT_TRUE(channel_->RemoveRecvStream(kSsrcY)); } -TEST_P(WebRtcVoiceEngineTestFake, TxAgcConfigViaOptions) { - EXPECT_TRUE(SetupSendStream()); - EXPECT_CALL(*adm_, BuiltInAGCIsAvailable()) - .Times(::testing::AtLeast(1)) - .WillRepeatedly(Return(false)); - - if (!use_null_apm_) { - // Ensure default options. - VerifyGainControlEnabledCorrectly(); - VerifyGainControlDefaultSettings(); - } - - const auto& agc_config = apm_config_.gain_controller1; - - send_parameters_.options.auto_gain_control = false; - SetSendParameters(send_parameters_); - if (!use_null_apm_) { - EXPECT_FALSE(agc_config.enabled); - } - send_parameters_.options.auto_gain_control = absl::nullopt; - - send_parameters_.options.tx_agc_target_dbov = 5; - SetSendParameters(send_parameters_); - if (!use_null_apm_) { - EXPECT_EQ(5, agc_config.target_level_dbfs); - } - send_parameters_.options.tx_agc_target_dbov = absl::nullopt; - - send_parameters_.options.tx_agc_digital_compression_gain = 10; - SetSendParameters(send_parameters_); - if (!use_null_apm_) { - EXPECT_EQ(10, agc_config.compression_gain_db); - } - send_parameters_.options.tx_agc_digital_compression_gain = absl::nullopt; - - send_parameters_.options.tx_agc_limiter = false; - SetSendParameters(send_parameters_); - if (!use_null_apm_) { - EXPECT_FALSE(agc_config.enable_limiter); - } - send_parameters_.options.tx_agc_limiter = absl::nullopt; - - SetSendParameters(send_parameters_); - if (!use_null_apm_) { - // Expect all options to have been preserved. - EXPECT_FALSE(agc_config.enabled); - EXPECT_EQ(5, agc_config.target_level_dbfs); - EXPECT_EQ(10, agc_config.compression_gain_db); - EXPECT_FALSE(agc_config.enable_limiter); - } -} - TEST_P(WebRtcVoiceEngineTestFake, SetAudioNetworkAdaptorViaOptions) { EXPECT_TRUE(SetupSendStream()); send_parameters_.options.audio_network_adaptor = true; @@ -2505,7 +2506,7 @@ TEST_P(WebRtcVoiceEngineTestFake, AudioNetworkAdaptorNotGetOverridden) { const int initial_num = call_.GetNumCreatedSendStreams(); cricket::AudioOptions options; options.audio_network_adaptor = absl::nullopt; - // Unvalued |options.audio_network_adaptor|.should not reset audio network + // Unvalued `options.audio_network_adaptor` should not reset audio network // adaptor. SetAudioSend(kSsrcX, true, nullptr, &options); // AudioSendStream not expected to be recreated. @@ -2979,32 +2980,10 @@ TEST_P(WebRtcVoiceEngineTestFake, SetAudioOptions) { if (!use_null_apm_) { VerifyEchoCancellationSettings(/*enabled=*/true); EXPECT_TRUE(IsHighPassFilterEnabled()); - EXPECT_TRUE(IsTypingDetectionEnabled()); } EXPECT_EQ(200u, GetRecvStreamConfig(kSsrcY).jitter_buffer_max_packets); EXPECT_FALSE(GetRecvStreamConfig(kSsrcY).jitter_buffer_fast_accelerate); - // Turn typing detection off. - send_parameters_.options.typing_detection = false; - SetSendParameters(send_parameters_); - if (!use_null_apm_) { - EXPECT_FALSE(IsTypingDetectionEnabled()); - } - - // Leave typing detection unchanged, but non-default. - send_parameters_.options.typing_detection = absl::nullopt; - SetSendParameters(send_parameters_); - if (!use_null_apm_) { - EXPECT_FALSE(IsTypingDetectionEnabled()); - } - - // Turn typing detection on. - send_parameters_.options.typing_detection = true; - SetSendParameters(send_parameters_); - if (!use_null_apm_) { - EXPECT_TRUE(IsTypingDetectionEnabled()); - } - // Turn echo cancellation off send_parameters_.options.echo_cancellation = false; SetSendParameters(send_parameters_); @@ -3070,6 +3049,34 @@ TEST_P(WebRtcVoiceEngineTestFake, SetAudioOptions) { } } +TEST_P(WebRtcVoiceEngineTestFake, InitRecordingOnSend) { + EXPECT_CALL(*adm_, RecordingIsInitialized()).WillOnce(Return(false)); + EXPECT_CALL(*adm_, Recording()).WillOnce(Return(false)); + EXPECT_CALL(*adm_, InitRecording()).Times(1); + + std::unique_ptr channel( + engine_->CreateMediaChannel(&call_, cricket::MediaConfig(), + cricket::AudioOptions(), + webrtc::CryptoOptions())); + + channel->SetSend(true); +} + +TEST_P(WebRtcVoiceEngineTestFake, SkipInitRecordingOnSend) { + EXPECT_CALL(*adm_, RecordingIsInitialized()).Times(0); + EXPECT_CALL(*adm_, Recording()).Times(0); + EXPECT_CALL(*adm_, InitRecording()).Times(0); + + cricket::AudioOptions options; + options.init_recording_on_send = false; + + std::unique_ptr channel( + engine_->CreateMediaChannel(&call_, cricket::MediaConfig(), options, + webrtc::CryptoOptions())); + + channel->SetSend(true); +} + TEST_P(WebRtcVoiceEngineTestFake, SetOptionOverridesViaChannels) { EXPECT_TRUE(SetupSendStream()); EXPECT_CALL(*adm_, BuiltInAECIsAvailable()) @@ -3611,11 +3618,11 @@ TEST_P(WebRtcVoiceEngineTestFake, PreservePlayoutWhenRecreateRecvStream) { // Tests when GetSources is called with non-existing ssrc, it will return an // empty list of RtpSource without crashing. TEST_P(WebRtcVoiceEngineTestFake, GetSourcesWithNonExistingSsrc) { - // Setup an recv stream with |kSsrcX|. + // Setup an recv stream with `kSsrcX`. SetupRecvStream(); cricket::WebRtcVoiceMediaChannel* media_channel = static_cast(channel_); - // Call GetSources with |kSsrcY| which doesn't exist. + // Call GetSources with `kSsrcY` which doesn't exist. std::vector sources = media_channel->GetSources(kSsrcY); EXPECT_EQ(0u, sources.size()); } @@ -3656,10 +3663,8 @@ TEST(WebRtcVoiceEngineTest, StartupShutdownWithExternalADM) { for (bool use_null_apm : {false, true}) { std::unique_ptr task_queue_factory = webrtc::CreateDefaultTaskQueueFactory(); - rtc::scoped_refptr>> - adm(new rtc::RefCountedObject< - ::testing::NiceMock>()); + auto adm = rtc::make_ref_counted< + ::testing::NiceMock>(); { rtc::scoped_refptr apm = use_null_apm ? nullptr : webrtc::AudioProcessingBuilder().Create(); @@ -3682,7 +3687,8 @@ TEST(WebRtcVoiceEngineTest, StartupShutdownWithExternalADM) { delete channel; } // The engine/channel should have dropped their references. - EXPECT_TRUE(adm->HasOneRef()); + EXPECT_EQ(adm.release()->Release(), + rtc::RefCountReleaseStatus::kDroppedLastRef); } } @@ -3844,7 +3850,7 @@ TEST(WebRtcVoiceEngineTest, CollectRecvCodecs) { rtc::scoped_refptr unused_encoder_factory = webrtc::MockAudioEncoderFactory::CreateUnusedFactory(); rtc::scoped_refptr mock_decoder_factory = - new rtc::RefCountedObject; + rtc::make_ref_counted(); EXPECT_CALL(*mock_decoder_factory.get(), GetSupportedDecoders()) .WillOnce(Return(specs)); rtc::scoped_refptr adm = diff --git a/media/sctp/dcsctp_transport.cc b/media/sctp/dcsctp_transport.cc index 3b89af1ec2..38d2799f36 100644 --- a/media/sctp/dcsctp_transport.cc +++ b/media/sctp/dcsctp_transport.cc @@ -26,6 +26,7 @@ #include "p2p/base/packet_transport_internal.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" +#include "rtc_base/socket.h" #include "rtc_base/strings/string_builder.h" #include "rtc_base/thread.h" #include "rtc_base/trace_event.h" @@ -34,6 +35,14 @@ namespace webrtc { namespace { +using ::dcsctp::SendPacketStatus; + +// When there is packet loss for a long time, the SCTP retry timers will use +// exponential backoff, which can grow to very long durations and when the +// connection recovers, it may take a long time to reach the new backoff +// duration. By limiting it to a reasonable limit, the time to recover reduces. +constexpr dcsctp::DurationMs kMaxTimerBackoffDuration = + dcsctp::DurationMs(3000); enum class WebrtcPPID : dcsctp::PPID::UnderlyingType { // https://www.rfc-editor.org/rfc/rfc8832.html#section-8.1 @@ -154,6 +163,10 @@ bool DcSctpTransport::Start(int local_sctp_port, options.local_port = local_sctp_port; options.remote_port = remote_sctp_port; options.max_message_size = max_message_size; + options.max_timer_backoff_duration = kMaxTimerBackoffDuration; + // Don't close the connection automatically on too many retransmissions. + options.max_retransmissions = absl::nullopt; + options.max_init_retransmits = absl::nullopt; std::unique_ptr packet_observer; if (RTC_LOG_CHECK_LEVEL(LS_VERBOSE)) { @@ -308,7 +321,8 @@ void DcSctpTransport::set_debug_name_for_testing(const char* debug_name) { debug_name_ = debug_name; } -void DcSctpTransport::SendPacket(rtc::ArrayView data) { +SendPacketStatus DcSctpTransport::SendPacketWithStatus( + rtc::ArrayView data) { RTC_DCHECK_RUN_ON(network_thread_); RTC_DCHECK(socket_); @@ -318,15 +332,15 @@ void DcSctpTransport::SendPacket(rtc::ArrayView data) { "SCTP seems to have made a packet that is bigger " "than its official MTU: " << data.size() << " vs max of " << socket_->options().mtu; - return; + return SendPacketStatus::kError; } TRACE_EVENT0("webrtc", "DcSctpTransport::SendPacket"); if (!transport_ || !transport_->writable()) - return; + return SendPacketStatus::kError; - RTC_LOG(LS_VERBOSE) << debug_name_ << "->SendPacket(length=" << data.size() - << ")"; + RTC_DLOG(LS_VERBOSE) << debug_name_ << "->SendPacket(length=" << data.size() + << ")"; auto result = transport_->SendPacket(reinterpret_cast(data.data()), @@ -336,11 +350,18 @@ void DcSctpTransport::SendPacket(rtc::ArrayView data) { RTC_LOG(LS_WARNING) << debug_name_ << "->SendPacket(length=" << data.size() << ") failed with error: " << transport_->GetError() << "."; + + if (rtc::IsBlockingError(transport_->GetError())) { + return SendPacketStatus::kTemporaryFailure; + } + return SendPacketStatus::kError; } + return SendPacketStatus::kSuccess; } -std::unique_ptr DcSctpTransport::CreateTimeout() { - return task_queue_timeout_factory_.CreateTimeout(); +std::unique_ptr DcSctpTransport::CreateTimeout( + webrtc::TaskQueueBase::DelayPrecision precision) { + return task_queue_timeout_factory_.CreateTimeout(precision); } dcsctp::TimeMs DcSctpTransport::TimeMillis() { @@ -386,9 +407,18 @@ void DcSctpTransport::OnMessageReceived(dcsctp::DcSctpMessage message) { void DcSctpTransport::OnError(dcsctp::ErrorKind error, absl::string_view message) { - RTC_LOG(LS_ERROR) << debug_name_ - << "->OnError(error=" << dcsctp::ToString(error) - << ", message=" << message << ")."; + if (error == dcsctp::ErrorKind::kResourceExhaustion) { + // Indicates that a message failed to be enqueued, because the send buffer + // is full, which is a very common (and wanted) state for high throughput + // sending/benchmarks. + RTC_LOG(LS_VERBOSE) << debug_name_ + << "->OnError(error=" << dcsctp::ToString(error) + << ", message=" << message << ")."; + } else { + RTC_LOG(LS_ERROR) << debug_name_ + << "->OnError(error=" << dcsctp::ToString(error) + << ", message=" << message << ")."; + } } void DcSctpTransport::OnAborted(dcsctp::ErrorKind error, @@ -496,13 +526,14 @@ void DcSctpTransport::OnTransportReadPacket( size_t length, const int64_t& /* packet_time_us */, int flags) { + RTC_DCHECK_RUN_ON(network_thread_); if (flags) { // We are only interested in SCTP packets. return; } - RTC_LOG(LS_VERBOSE) << debug_name_ - << "->OnTransportReadPacket(), length=" << length; + RTC_DLOG(LS_VERBOSE) << debug_name_ + << "->OnTransportReadPacket(), length=" << length; if (socket_) { socket_->ReceivePacket(rtc::ArrayView( reinterpret_cast(data), length)); diff --git a/media/sctp/dcsctp_transport.h b/media/sctp/dcsctp_transport.h index 15933383b5..11c2f829c5 100644 --- a/media/sctp/dcsctp_transport.h +++ b/media/sctp/dcsctp_transport.h @@ -17,6 +17,7 @@ #include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "api/array_view.h" +#include "api/task_queue/task_queue_base.h" #include "media/sctp/sctp_transport_internal.h" #include "net/dcsctp/public/dcsctp_options.h" #include "net/dcsctp/public/dcsctp_socket.h" @@ -59,8 +60,10 @@ class DcSctpTransport : public cricket::SctpTransportInternal, private: // dcsctp::DcSctpSocketCallbacks - void SendPacket(rtc::ArrayView data) override; - std::unique_ptr CreateTimeout() override; + dcsctp::SendPacketStatus SendPacketWithStatus( + rtc::ArrayView data) override; + std::unique_ptr CreateTimeout( + webrtc::TaskQueueBase::DelayPrecision precision) override; dcsctp::TimeMs TimeMillis() override; uint32_t GetRandomInt(uint32_t low, uint32_t high) override; void OnTotalBufferedAmountLow() override; diff --git a/media/sctp/sctp_transport_factory.cc b/media/sctp/sctp_transport_factory.cc index 5097d423d9..071d7fdb23 100644 --- a/media/sctp/sctp_transport_factory.cc +++ b/media/sctp/sctp_transport_factory.cc @@ -25,11 +25,11 @@ namespace cricket { SctpTransportFactory::SctpTransportFactory(rtc::Thread* network_thread) - : network_thread_(network_thread), use_dcsctp_("Enabled", false) { + : network_thread_(network_thread), use_usrsctp_("Disabled", false) { RTC_UNUSED(network_thread_); #ifdef WEBRTC_HAVE_DCSCTP - webrtc::ParseFieldTrial({&use_dcsctp_}, webrtc::field_trial::FindFullName( - "WebRTC-DataChannel-Dcsctp")); + webrtc::ParseFieldTrial({&use_usrsctp_}, webrtc::field_trial::FindFullName( + "WebRTC-DataChannel-Dcsctp")); #endif } @@ -38,7 +38,7 @@ SctpTransportFactory::CreateSctpTransport( rtc::PacketTransportInternal* transport) { std::unique_ptr result; #ifdef WEBRTC_HAVE_DCSCTP - if (use_dcsctp_.Get()) { + if (!use_usrsctp_.Get()) { result = std::unique_ptr(new webrtc::DcSctpTransport( network_thread_, transport, webrtc::Clock::GetRealTimeClock())); } diff --git a/media/sctp/sctp_transport_factory.h b/media/sctp/sctp_transport_factory.h index ed7c2163d7..9ae246a6a6 100644 --- a/media/sctp/sctp_transport_factory.h +++ b/media/sctp/sctp_transport_factory.h @@ -29,7 +29,7 @@ class SctpTransportFactory : public webrtc::SctpTransportFactoryInterface { private: rtc::Thread* network_thread_; - webrtc::FieldTrialFlag use_dcsctp_; + webrtc::FieldTrialFlag use_usrsctp_; }; } // namespace cricket diff --git a/media/sctp/sctp_transport_internal.h b/media/sctp/sctp_transport_internal.h index b1327165b6..93a59b9dc7 100644 --- a/media/sctp/sctp_transport_internal.h +++ b/media/sctp/sctp_transport_internal.h @@ -86,11 +86,11 @@ class SctpTransportInternal { // completes. This method can be called multiple times, though not if either // of the ports are changed. // - // |local_sctp_port| and |remote_sctp_port| are passed along the wire and the + // `local_sctp_port` and `remote_sctp_port` are passed along the wire and the // listener and connector must be using the same port. They are not related // to the ports at the IP level. If set to -1, we default to // kSctpDefaultPort. - // |max_message_size_| sets the max message size on the connection. + // `max_message_size_` sets the max message size on the connection. // It must be smaller than or equal to kSctpSendBufferSize. // It can be changed by a secons Start() call. // @@ -104,10 +104,10 @@ class SctpTransportInternal { // NOTE: Initially there was a "Stop" method here, but it was never used, so // it was removed. - // Informs SctpTransport that |sid| will start being used. Returns false if - // it is impossible to use |sid|, or if it's already in use. - // Until calling this, can't send data using |sid|. - // TODO(deadbeef): Actually implement the "returns false if |sid| can't be + // Informs SctpTransport that `sid` will start being used. Returns false if + // it is impossible to use `sid`, or if it's already in use. + // Until calling this, can't send data using `sid`. + // TODO(deadbeef): Actually implement the "returns false if `sid` can't be // used" part. See: // https://bugs.chromium.org/p/chromium/issues/detail?id=619849 virtual bool OpenStream(int sid) = 0; @@ -119,7 +119,7 @@ class SctpTransportInternal { // Send data down this channel (will be wrapped as SCTP packets then given to // usrsctp that will then post the network interface). // Returns true iff successful data somewhere on the send-queue/network. - // Uses |params.ssrc| as the SCTP sid. + // Uses `params.ssrc` as the SCTP sid. virtual bool SendData(int sid, const webrtc::SendDataParams& params, const rtc::CopyOnWriteBuffer& payload, diff --git a/media/sctp/usrsctp_transport.cc b/media/sctp/usrsctp_transport.cc index 7824a72934..4babf110a2 100644 --- a/media/sctp/usrsctp_transport.cc +++ b/media/sctp/usrsctp_transport.cc @@ -304,7 +304,7 @@ class UsrsctpTransportMap { return map_.erase(id) > 0; } - // Posts |action| to the network thread of the transport identified by |id| + // Posts `action` to the network thread of the transport identified by `id` // and returns true if found, all while holding a lock to protect against the // transport being simultaneously deleted/deregistered, or returns false if // not found. @@ -490,114 +490,54 @@ class UsrsctpTransport::UsrSctpWrapper { void* ulp_info) { AutoFreedPointer owned_data(data); - absl::optional id = GetTransportIdFromSocket(sock); - if (!id) { - RTC_LOG(LS_ERROR) - << "OnSctpInboundPacket: Failed to get transport ID from socket " - << sock; - return kSctpErrorReturn; - } - if (!g_transport_map_) { RTC_LOG(LS_ERROR) << "OnSctpInboundPacket called after usrsctp uninitialized?"; return kSctpErrorReturn; } + + uintptr_t id = reinterpret_cast(ulp_info); + // PostsToTransportThread protects against the transport being // simultaneously deregistered/deleted, since this callback may come from // the SCTP timer thread and thus race with the network thread. bool found = g_transport_map_->PostToTransportThread( - *id, [owned_data{std::move(owned_data)}, length, rcv, - flags](UsrsctpTransport* transport) { + id, [owned_data{std::move(owned_data)}, length, rcv, + flags](UsrsctpTransport* transport) { transport->OnDataOrNotificationFromSctp(owned_data.get(), length, rcv, flags); }); if (!found) { RTC_LOG(LS_ERROR) - << "OnSctpInboundPacket: Failed to get transport for socket ID " - << *id << "; possibly was already destroyed."; + << "OnSctpInboundPacket: Failed to get transport for socket ID " << id + << "; possibly was already destroyed."; return kSctpErrorReturn; } return kSctpSuccessReturn; } - static absl::optional GetTransportIdFromSocket( - struct socket* sock) { - absl::optional ret; - struct sockaddr* addrs = nullptr; - int naddrs = usrsctp_getladdrs(sock, 0, &addrs); - if (naddrs <= 0 || addrs[0].sa_family != AF_CONN) { - return ret; - } - // usrsctp_getladdrs() returns the addresses bound to this socket, which - // contains the UsrsctpTransport id as sconn_addr. Read the id, - // then free the list of addresses once we have the pointer. We only open - // AF_CONN sockets, and they should all have the sconn_addr set to the - // id of the transport that created them, so [0] is as good as any other. - struct sockaddr_conn* sconn = - reinterpret_cast(&addrs[0]); - ret = reinterpret_cast(sconn->sconn_addr); - usrsctp_freeladdrs(addrs); - - return ret; - } - - // TODO(crbug.com/webrtc/11899): This is a legacy callback signature, remove - // when usrsctp is updated. - static int SendThresholdCallback(struct socket* sock, uint32_t sb_free) { - // Fired on our I/O thread. UsrsctpTransport::OnPacketReceived() gets - // a packet containing acknowledgments, which goes into usrsctp_conninput, - // and then back here. - absl::optional id = GetTransportIdFromSocket(sock); - if (!id) { - RTC_LOG(LS_ERROR) - << "SendThresholdCallback: Failed to get transport ID from socket " - << sock; - return 0; - } - if (!g_transport_map_) { - RTC_LOG(LS_ERROR) - << "SendThresholdCallback called after usrsctp uninitialized?"; - return 0; - } - bool found = g_transport_map_->PostToTransportThread( - *id, [](UsrsctpTransport* transport) { - transport->OnSendThresholdCallback(); - }); - if (!found) { - RTC_LOG(LS_ERROR) - << "SendThresholdCallback: Failed to get transport for socket ID " - << *id << "; possibly was already destroyed."; - } - return 0; - } - static int SendThresholdCallback(struct socket* sock, uint32_t sb_free, void* ulp_info) { // Fired on our I/O thread. UsrsctpTransport::OnPacketReceived() gets // a packet containing acknowledgments, which goes into usrsctp_conninput, // and then back here. - absl::optional id = GetTransportIdFromSocket(sock); - if (!id) { - RTC_LOG(LS_ERROR) - << "SendThresholdCallback: Failed to get transport ID from socket " - << sock; - return 0; - } if (!g_transport_map_) { RTC_LOG(LS_ERROR) << "SendThresholdCallback called after usrsctp uninitialized?"; return 0; } + + uintptr_t id = reinterpret_cast(ulp_info); + bool found = g_transport_map_->PostToTransportThread( - *id, [](UsrsctpTransport* transport) { + id, [](UsrsctpTransport* transport) { transport->OnSendThresholdCallback(); }); if (!found) { RTC_LOG(LS_ERROR) << "SendThresholdCallback: Failed to get transport for socket ID " - << *id << "; possibly was already destroyed."; + << id << "; possibly was already destroyed."; } return 0; } @@ -848,8 +788,8 @@ SendDataResult UsrsctpTransport::SendMessageInternal(OutgoingMessage* message) { if (send_res < 0) { if (errno == SCTP_EWOULDBLOCK) { ready_to_send_data_ = false; - RTC_LOG(LS_INFO) << debug_name_ - << "->SendMessageInternal(...): EWOULDBLOCK returned"; + RTC_LOG(LS_VERBOSE) << debug_name_ + << "->SendMessageInternal(...): EWOULDBLOCK returned"; return SDR_BLOCK; } @@ -977,7 +917,7 @@ bool UsrsctpTransport::OpenSctpSocket() { sock_ = usrsctp_socket( AF_CONN, SOCK_STREAM, IPPROTO_SCTP, &UsrSctpWrapper::OnSctpInboundPacket, - &UsrSctpWrapper::SendThresholdCallback, kSendThreshold, this); + &UsrSctpWrapper::SendThresholdCallback, kSendThreshold, nullptr); if (!sock_) { RTC_LOG_ERRNO(LS_ERROR) << debug_name_ << "->OpenSctpSocket(): " @@ -993,6 +933,7 @@ bool UsrsctpTransport::OpenSctpSocket() { return false; } id_ = g_transport_map_->Register(this); + usrsctp_set_ulpinfo(sock_, reinterpret_cast(id_)); // Register our id as an address for usrsctp. This is used by SCTP to // direct the packets received (by the created socket) to this class. usrsctp_register_address(reinterpret_cast(id_)); @@ -1231,8 +1172,8 @@ void UsrsctpTransport::OnPacketRead(rtc::PacketTransportInternal* transport, // packet will have called connect, and a connection will be established. if (sock_) { // Pass received packet to SCTP stack. Once processed by usrsctp, the data - // will be will be given to the global OnSctpInboundData, and then, - // marshalled by the AsyncInvoker. + // will be will be given to the global OnSctpInboundPacket callback and + // posted to the transport thread. VerboseLogPacket(data, len, SCTP_DUMP_INBOUND); usrsctp_conninput(reinterpret_cast(id_), data, len, 0); } else { diff --git a/media/sctp/usrsctp_transport.h b/media/sctp/usrsctp_transport.h index 5dcf57b243..2dd6abf9c5 100644 --- a/media/sctp/usrsctp_transport.h +++ b/media/sctp/usrsctp_transport.h @@ -22,7 +22,6 @@ #include "absl/types/optional.h" #include "rtc_base/buffer.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/copy_on_write_buffer.h" #include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/third_party/sigslot/sigslot.h" @@ -68,14 +67,17 @@ struct SctpInboundPacket; class UsrsctpTransport : public SctpTransportInternal, public sigslot::has_slots<> { public: - // |network_thread| is where packets will be processed and callbacks from + // `network_thread` is where packets will be processed and callbacks from // this transport will be posted, and is the only thread on which public // methods can be called. - // |transport| is not required (can be null). + // `transport` is not required (can be null). UsrsctpTransport(rtc::Thread* network_thread, rtc::PacketTransportInternal* transport); ~UsrsctpTransport() override; + UsrsctpTransport(const UsrsctpTransport&) = delete; + UsrsctpTransport& operator=(const UsrsctpTransport&) = delete; + // SctpTransportInternal overrides (see sctptransportinternal.h for comments). void SetDtlsTransport(rtc::PacketTransportInternal* transport) override; bool Start(int local_port, int remote_port, int max_message_size) override; @@ -163,7 +165,7 @@ class UsrsctpTransport : public SctpTransportInternal, // buffered message was accepted by the sctp lib. bool SendBufferedMessage(); - // Tries to send the |payload| on the usrsctp lib. The message will be + // Tries to send the `payload` on the usrsctp lib. The message will be // advanced by the amount that was sent. SendDataResult SendMessageInternal(OutgoingMessage* message); @@ -180,7 +182,7 @@ class UsrsctpTransport : public SctpTransportInternal, void OnSendThresholdCallback(); sockaddr_conn GetSctpSockAddr(int port); - // Called using |invoker_| to send packet on the network. + // Called using `invoker_` to send packet on the network. void OnPacketFromSctpToNetwork(const rtc::CopyOnWriteBuffer& buffer); // Called on the network thread. @@ -189,10 +191,10 @@ class UsrsctpTransport : public SctpTransportInternal, size_t length, struct sctp_rcvinfo rcv, int flags); - // Called using |invoker_| to decide what to do with the data. + // Called using `invoker_` to decide what to do with the data. void OnDataFromSctpToTransport(const ReceiveDataParams& params, const rtc::CopyOnWriteBuffer& buffer); - // Called using |invoker_| to decide what to do with the notification. + // Called using `invoker_` to decide what to do with the notification. void OnNotificationFromSctp(const rtc::CopyOnWriteBuffer& buffer); void OnNotificationAssocChange(const sctp_assoc_change& change); @@ -226,7 +228,7 @@ class UsrsctpTransport : public SctpTransportInternal, // Has Start been called? Don't create SCTP socket until it has. bool started_ = false; // Are we ready to queue data (SCTP socket created, and not blocked due to - // congestion control)? Different than |transport_|'s "ready to send". + // congestion control)? Different than `transport_`'s "ready to send". bool ready_to_send_data_ = false; // Used to keep track of the status of each stream (or rather, each pair of @@ -268,7 +270,7 @@ class UsrsctpTransport : public SctpTransportInternal, } }; - // Entries should only be removed from this map if |reset_complete| is + // Entries should only be removed from this map if `reset_complete` is // true. std::map stream_status_by_sid_; @@ -285,8 +287,6 @@ class UsrsctpTransport : public SctpTransportInternal, uintptr_t id_ = 0; friend class UsrsctpTransportMap; - - RTC_DISALLOW_COPY_AND_ASSIGN(UsrsctpTransport); }; class UsrsctpTransportMap; diff --git a/media/sctp/usrsctp_transport_reliability_unittest.cc b/media/sctp/usrsctp_transport_reliability_unittest.cc index 104e320398..987dd04358 100644 --- a/media/sctp/usrsctp_transport_reliability_unittest.cc +++ b/media/sctp/usrsctp_transport_reliability_unittest.cc @@ -58,6 +58,9 @@ class SimulatedPacketTransport final : public rtc::PacketTransportInternal { SignalWritableState(this); } + SimulatedPacketTransport(const SimulatedPacketTransport&) = delete; + SimulatedPacketTransport& operator=(const SimulatedPacketTransport&) = delete; + const std::string& transport_name() const override { return transport_name_; } bool writable() const override { return destination_ != nullptr; } @@ -129,7 +132,6 @@ class SimulatedPacketTransport final : public rtc::PacketTransportInternal { std::atomic destination_ ATOMIC_VAR_INIT(nullptr); webrtc::Random random_; webrtc::ScopedTaskSafety task_safety_; - RTC_DISALLOW_COPY_AND_ASSIGN(SimulatedPacketTransport); }; /** @@ -156,6 +158,9 @@ class SctpDataSender final { RTC_DCHECK(transport_); } + SctpDataSender(const SctpDataSender&) = delete; + SctpDataSender& operator=(const SctpDataSender&) = delete; + void Start() { thread_->PostTask(ToQueuedTask(task_safety_.flag(), [this] { if (started_) { @@ -236,7 +241,6 @@ class SctpDataSender final { std::atomic num_bytes_sent_ ATOMIC_VAR_INIT(0); absl::optional last_error_; webrtc::ScopedTaskSafetyDetached task_safety_; - RTC_DISALLOW_COPY_AND_ASSIGN(SctpDataSender); }; /** @@ -251,6 +255,9 @@ class SctpDataReceiver final : public sigslot::has_slots<> { : receiver_id_(receiver_id), target_messages_count_(target_messages_count) {} + SctpDataReceiver(const SctpDataReceiver&) = delete; + SctpDataReceiver& operator=(const SctpDataReceiver&) = delete; + void OnDataReceived(const cricket::ReceiveDataParams& params, const rtc::CopyOnWriteBuffer& data) { num_bytes_received_ += data.size(); @@ -259,8 +266,8 @@ class SctpDataReceiver final : public sigslot::has_slots<> { } if (num_messages_received_ % kLogPerMessagesCount == 0) { - RTC_LOG(INFO) << receiver_id_ << " receiver got " - << num_messages_received_ << " messages"; + RTC_LOG(LS_INFO) << receiver_id_ << " receiver got " + << num_messages_received_ << " messages"; } } @@ -278,7 +285,6 @@ class SctpDataReceiver final : public sigslot::has_slots<> { rtc::Event received_target_messages_count_{true, false}; const uint32_t receiver_id_; const uint64_t target_messages_count_; - RTC_DISALLOW_COPY_AND_ASSIGN(SctpDataReceiver); }; /** @@ -297,6 +303,9 @@ class ThreadPool final { } } + ThreadPool(const ThreadPool&) = delete; + ThreadPool& operator=(const ThreadPool&) = delete; + rtc::Thread* GetRandomThread() { return threads_[random_.Rand(0U, threads_.size() - 1)].get(); } @@ -304,7 +313,6 @@ class ThreadPool final { private: webrtc::Random random_; std::vector> threads_; - RTC_DISALLOW_COPY_AND_ASSIGN(ThreadPool); }; /** @@ -360,6 +368,9 @@ class SctpPingPong final { }); } + SctpPingPong(const SctpPingPong&) = delete; + SctpPingPong& operator=(const SctpPingPong&) = delete; + bool Start() { CreateTwoConnectedSctpTransportsWithAllStreams(); @@ -577,7 +588,6 @@ class SctpPingPong final { const uint8_t packet_loss_percents_; const uint16_t avg_send_delay_millis_; const webrtc::SendDataParams send_params_; - RTC_DISALLOW_COPY_AND_ASSIGN(SctpPingPong); }; /** diff --git a/modules/async_audio_processing/async_audio_processing.h b/modules/async_audio_processing/async_audio_processing.h index 7e09d69f15..bbd0f69b1b 100644 --- a/modules/async_audio_processing/async_audio_processing.h +++ b/modules/async_audio_processing/async_audio_processing.h @@ -53,16 +53,16 @@ class AsyncAudioProcessing final { ~AsyncAudioProcessing(); // Creates AsyncAudioProcessing which will pass audio frames to - // |frame_processor| on |task_queue_| and reply with processed frames passed - // into |on_frame_processed_callback|, which is posted back onto - // |task_queue_|. |task_queue_| is created using the provided - // |task_queue_factory|. + // `frame_processor` on `task_queue_` and reply with processed frames passed + // into `on_frame_processed_callback`, which is posted back onto + // `task_queue_`. `task_queue_` is created using the provided + // `task_queue_factory`. AsyncAudioProcessing( AudioFrameProcessor& frame_processor, TaskQueueFactory& task_queue_factory, AudioFrameProcessor::OnAudioFrameCallback on_frame_processed_callback); - // Accepts |frame| for asynchronous processing. Thread-safe. + // Accepts `frame` for asynchronous processing. Thread-safe. void Process(std::unique_ptr frame); private: diff --git a/modules/audio_coding/BUILD.gn b/modules/audio_coding/BUILD.gn index d1d17267e5..0d5e429d42 100644 --- a/modules/audio_coding/BUILD.gn +++ b/modules/audio_coding/BUILD.gn @@ -974,6 +974,10 @@ rtc_library("neteq") { "neteq/random_vector.h", "neteq/red_payload_splitter.cc", "neteq/red_payload_splitter.h", + "neteq/relative_arrival_delay_tracker.cc", + "neteq/relative_arrival_delay_tracker.h", + "neteq/reorder_optimizer.cc", + "neteq/reorder_optimizer.h", "neteq/statistics_calculator.cc", "neteq/statistics_calculator.h", "neteq/sync_buffer.cc", @@ -982,12 +986,13 @@ rtc_library("neteq") { "neteq/time_stretch.h", "neteq/timestamp_scaler.cc", "neteq/timestamp_scaler.h", + "neteq/underrun_optimizer.cc", + "neteq/underrun_optimizer.h", ] deps = [ ":audio_coding_module_typedefs", ":webrtc_cng", - "..:module_api", "..:module_api_public", "../../api:array_view", "../../api:rtp_headers", @@ -1106,7 +1111,6 @@ rtc_library("neteq_test_tools") { "../../rtc_base:rtc_base_approved", "../../rtc_base/system:arch", "../../test:rtp_test_utils", - "../rtp_rtcp", "../rtp_rtcp:rtp_rtcp_format", ] absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] @@ -1314,7 +1318,6 @@ if (rtc_include_tests) { ":g722_test", ":ilbc_test", ":isac_api_test", - ":isac_fix_test", ":isac_switch_samprate_test", ":isac_test", ":neteq_ilbc_quality_test", @@ -1434,7 +1437,6 @@ if (rtc_include_tests) { deps = audio_coding_deps + [ "../../api:scoped_refptr", - "..:module_api", ":audio_coding", "../../api/audio_codecs:audio_codecs_api", "../../api/audio_codecs:builtin_audio_decoder_factory", @@ -1833,20 +1835,6 @@ if (rtc_include_tests) { "//third_party/abseil-cpp/absl/flags:flag", ] } - - rtc_test("isac_fix_test") { - testonly = true - - sources = [ "codecs/isac/fix/test/kenny.cc" ] - - deps = [ - ":isac_fix", - "../../test:perf_test", - "../../test:test_support", - ] - - data = [ "../../resources/speech_and_misc_wb.pcm" ] - } } rtc_library("isac_test_util") { @@ -2012,12 +2000,15 @@ if (rtc_include_tests) { "neteq/post_decode_vad_unittest.cc", "neteq/random_vector_unittest.cc", "neteq/red_payload_splitter_unittest.cc", + "neteq/relative_arrival_delay_tracker_unittest.cc", + "neteq/reorder_optimizer_unittest.cc", "neteq/statistics_calculator_unittest.cc", "neteq/sync_buffer_unittest.cc", "neteq/time_stretch_unittest.cc", "neteq/timestamp_scaler_unittest.cc", "neteq/tools/input_audio_file_unittest.cc", "neteq/tools/packet_unittest.cc", + "neteq/underrun_optimizer_unittest.cc", ] deps = [ diff --git a/modules/audio_coding/acm2/acm_receive_test.h b/modules/audio_coding/acm2/acm_receive_test.h index 043092c086..2095ef9025 100644 --- a/modules/audio_coding/acm2/acm_receive_test.h +++ b/modules/audio_coding/acm2/acm_receive_test.h @@ -18,7 +18,6 @@ #include "api/audio_codecs/audio_decoder_factory.h" #include "api/scoped_refptr.h" -#include "rtc_base/constructor_magic.h" #include "system_wrappers/include/clock.h" namespace webrtc { @@ -45,6 +44,9 @@ class AcmReceiveTestOldApi { rtc::scoped_refptr decoder_factory); virtual ~AcmReceiveTestOldApi(); + AcmReceiveTestOldApi(const AcmReceiveTestOldApi&) = delete; + AcmReceiveTestOldApi& operator=(const AcmReceiveTestOldApi&) = delete; + // Registers the codecs with default parameters from ACM. void RegisterDefaultCodecs(); @@ -67,12 +69,10 @@ class AcmReceiveTestOldApi { AudioSink* audio_sink_; int output_freq_hz_; NumOutputChannels exptected_output_channels_; - - RTC_DISALLOW_COPY_AND_ASSIGN(AcmReceiveTestOldApi); }; -// This test toggles the output frequency every |toggle_period_ms|. The test -// starts with |output_freq_hz_1|. Except for the toggling, it does the same +// This test toggles the output frequency every `toggle_period_ms`. The test +// starts with `output_freq_hz_1`. Except for the toggling, it does the same // thing as AcmReceiveTestOldApi. class AcmReceiveTestToggleOutputFreqOldApi : public AcmReceiveTestOldApi { public: diff --git a/modules/audio_coding/acm2/acm_receiver.cc b/modules/audio_coding/acm2/acm_receiver.cc index 3214ce6f7b..aa9816956e 100644 --- a/modules/audio_coding/acm2/acm_receiver.cc +++ b/modules/audio_coding/acm2/acm_receiver.cc @@ -66,14 +66,14 @@ AcmReceiver::~AcmReceiver() = default; int AcmReceiver::SetMinimumDelay(int delay_ms) { if (neteq_->SetMinimumDelay(delay_ms)) return 0; - RTC_LOG(LERROR) << "AcmReceiver::SetExtraDelay " << delay_ms; + RTC_LOG(LS_ERROR) << "AcmReceiver::SetExtraDelay " << delay_ms; return -1; } int AcmReceiver::SetMaximumDelay(int delay_ms) { if (neteq_->SetMaximumDelay(delay_ms)) return 0; - RTC_LOG(LERROR) << "AcmReceiver::SetExtraDelay " << delay_ms; + RTC_LOG(LS_ERROR) << "AcmReceiver::SetExtraDelay " << delay_ms; return -1; } @@ -131,12 +131,12 @@ int AcmReceiver::InsertPacket(const RTPHeader& rtp_header, /*num_channels=*/format->num_channels, /*sdp_format=*/std::move(format->sdp_format)}; } - } // |mutex_| is released. + } // `mutex_` is released. if (neteq_->InsertPacket(rtp_header, incoming_payload) < 0) { - RTC_LOG(LERROR) << "AcmReceiver::InsertPacket " - << static_cast(rtp_header.payloadType) - << " Failed to insert packet"; + RTC_LOG(LS_ERROR) << "AcmReceiver::InsertPacket " + << static_cast(rtp_header.payloadType) + << " Failed to insert packet"; return -1; } return 0; @@ -150,7 +150,7 @@ int AcmReceiver::GetAudio(int desired_freq_hz, int current_sample_rate_hz = 0; if (neteq_->GetAudio(audio_frame, muted, ¤t_sample_rate_hz) != NetEq::kOK) { - RTC_LOG(LERROR) << "AcmReceiver::GetAudio - NetEq Failed."; + RTC_LOG(LS_ERROR) << "AcmReceiver::GetAudio - NetEq Failed."; return -1; } @@ -170,8 +170,8 @@ int AcmReceiver::GetAudio(int desired_freq_hz, audio_frame->num_channels_, AudioFrame::kMaxDataSizeSamples, temp_output); if (samples_per_channel_int < 0) { - RTC_LOG(LERROR) << "AcmReceiver::GetAudio - " - "Resampling last_audio_buffer_ failed."; + RTC_LOG(LS_ERROR) << "AcmReceiver::GetAudio - " + "Resampling last_audio_buffer_ failed."; return -1; } } @@ -185,7 +185,7 @@ int AcmReceiver::GetAudio(int desired_freq_hz, audio_frame->num_channels_, AudioFrame::kMaxDataSizeSamples, audio_frame->mutable_data()); if (samples_per_channel_int < 0) { - RTC_LOG(LERROR) + RTC_LOG(LS_ERROR) << "AcmReceiver::GetAudio - Resampling audio_buffer_ failed."; return -1; } @@ -201,7 +201,7 @@ int AcmReceiver::GetAudio(int desired_freq_hz, // We might end up here ONLY if codec is changed. } - // Store current audio in |last_audio_buffer_| for next time. + // Store current audio in `last_audio_buffer_` for next time. memcpy(last_audio_buffer_.get(), audio_frame->data(), sizeof(int16_t) * audio_frame->samples_per_channel_ * audio_frame->num_channels_); @@ -307,6 +307,8 @@ void AcmReceiver::GetNetworkStatistics( neteq_->GetOperationsAndState(); acm_stat->packetBufferFlushes = neteq_operations_and_state.packet_buffer_flushes; + acm_stat->packetsDiscarded = + neteq_operations_and_state.discarded_primary_packets; } int AcmReceiver::EnableNack(size_t max_nack_list_size) { diff --git a/modules/audio_coding/acm2/acm_receiver.h b/modules/audio_coding/acm2/acm_receiver.h index 19dc57758e..18b662aed0 100644 --- a/modules/audio_coding/acm2/acm_receiver.h +++ b/modules/audio_coding/acm2/acm_receiver.h @@ -177,10 +177,10 @@ class AcmReceiver { // enabled then the maximum NACK list size is modified accordingly. // // If the sequence number of last received packet is N, the sequence numbers - // of NACK list are in the range of [N - |max_nack_list_size|, N). + // of NACK list are in the range of [N - `max_nack_list_size`, N). // - // |max_nack_list_size| should be positive (none zero) and less than or - // equal to |Nack::kNackListSizeLimit|. Otherwise, No change is applied and -1 + // `max_nack_list_size` should be positive (none zero) and less than or + // equal to `Nack::kNackListSizeLimit`. Otherwise, No change is applied and -1 // is returned. 0 is returned at success. // int EnableNack(size_t max_nack_list_size); @@ -189,12 +189,12 @@ class AcmReceiver { void DisableNack(); // - // Get a list of packets to be retransmitted. |round_trip_time_ms| is an + // Get a list of packets to be retransmitted. `round_trip_time_ms` is an // estimate of the round-trip-time (in milliseconds). Missing packets which // will be playout in a shorter time than the round-trip-time (with respect // to the time this API is called) will not be included in the list. // - // Negative |round_trip_time_ms| results is an error message and empty list + // Negative `round_trip_time_ms` results is an error message and empty list // is returned. // std::vector GetNackList(int64_t round_trip_time_ms) const; diff --git a/modules/audio_coding/acm2/acm_receiver_unittest.cc b/modules/audio_coding/acm2/acm_receiver_unittest.cc index 2338a53235..e73acc2338 100644 --- a/modules/audio_coding/acm2/acm_receiver_unittest.cc +++ b/modules/audio_coding/acm2/acm_receiver_unittest.cc @@ -119,7 +119,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback, rtp_header_, rtc::ArrayView(payload_data, payload_len_bytes)); if (ret_val < 0) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return -1; } rtp_header_.sequenceNumber++; diff --git a/modules/audio_coding/acm2/acm_resampler.cc b/modules/audio_coding/acm2/acm_resampler.cc index 367ec2b9cd..e307c6ca57 100644 --- a/modules/audio_coding/acm2/acm_resampler.cc +++ b/modules/audio_coding/acm2/acm_resampler.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/acm2/acm_resampler.h" -#include #include #include "rtc_base/logging.h" @@ -31,7 +30,7 @@ int ACMResampler::Resample10Msec(const int16_t* in_audio, size_t in_length = in_freq_hz * num_audio_channels / 100; if (in_freq_hz == out_freq_hz) { if (out_capacity_samples < in_length) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return -1; } memcpy(out_audio, in_audio, in_length * sizeof(int16_t)); diff --git a/modules/audio_coding/acm2/acm_send_test.cc b/modules/audio_coding/acm2/acm_send_test.cc index cda668dab8..a84fa7c3e9 100644 --- a/modules/audio_coding/acm2/acm_send_test.cc +++ b/modules/audio_coding/acm2/acm_send_test.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/acm2/acm_send_test.h" -#include #include #include diff --git a/modules/audio_coding/acm2/acm_send_test.h b/modules/audio_coding/acm2/acm_send_test.h index 0c82415d11..b14cb80c6a 100644 --- a/modules/audio_coding/acm2/acm_send_test.h +++ b/modules/audio_coding/acm2/acm_send_test.h @@ -17,7 +17,6 @@ #include "api/audio/audio_frame.h" #include "modules/audio_coding/include/audio_coding_module.h" #include "modules/audio_coding/neteq/tools/packet_source.h" -#include "rtc_base/constructor_magic.h" #include "system_wrappers/include/clock.h" namespace webrtc { @@ -35,6 +34,9 @@ class AcmSendTestOldApi : public AudioPacketizationCallback, int test_duration_ms); ~AcmSendTestOldApi() override; + AcmSendTestOldApi(const AcmSendTestOldApi&) = delete; + AcmSendTestOldApi& operator=(const AcmSendTestOldApi&) = delete; + // Registers the send codec. Returns true on success, false otherwise. bool RegisterCodec(const char* payload_name, int sampling_freq_hz, @@ -81,8 +83,6 @@ class AcmSendTestOldApi : public AudioPacketizationCallback, uint16_t sequence_number_; std::vector last_payload_vec_; bool data_to_send_; - - RTC_DISALLOW_COPY_AND_ASSIGN(AcmSendTestOldApi); }; } // namespace test diff --git a/modules/audio_coding/acm2/audio_coding_module.cc b/modules/audio_coding/acm2/audio_coding_module.cc index 7d0f4d1e84..e2081e20dc 100644 --- a/modules/audio_coding/acm2/audio_coding_module.cc +++ b/modules/audio_coding/acm2/audio_coding_module.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/include/audio_coding_module.h" -#include #include #include @@ -93,6 +92,8 @@ class AudioCodingModuleImpl final : public AudioCodingModule { ANAStats GetANAStats() const override; + int GetTargetBitrate() const override; + private: struct InputData { InputData() : buffer(kInitialInputDataBufferSize) {} @@ -126,7 +127,7 @@ class AudioCodingModuleImpl final : public AudioCodingModule { int Add10MsDataInternal(const AudioFrame& audio_frame, InputData* input_data) RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_mutex_); - // TODO(bugs.webrtc.org/10739): change |absolute_capture_timestamp_ms| to + // TODO(bugs.webrtc.org/10739): change `absolute_capture_timestamp_ms` to // int64_t when it always receives a valid value. int Encode(const InputData& input_data, absl::optional absolute_capture_timestamp_ms) @@ -142,8 +143,8 @@ class AudioCodingModuleImpl final : public AudioCodingModule { // // in_frame: input audio-frame // ptr_out: pointer to output audio_frame. If no preprocessing is required - // |ptr_out| will be pointing to |in_frame|, otherwise pointing to - // |preprocess_frame_|. + // `ptr_out` will be pointing to `in_frame`, otherwise pointing to + // `preprocess_frame_`. // // Return value: // -1: if encountering an error. @@ -153,7 +154,7 @@ class AudioCodingModuleImpl final : public AudioCodingModule { RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_mutex_); // Change required states after starting to receive the codec corresponding - // to |index|. + // to `index`. int UpdateUponReceivingCodec(int index); mutable Mutex acm_mutex_; @@ -230,7 +231,7 @@ int32_t AudioCodingModuleImpl::Encode( const InputData& input_data, absl::optional absolute_capture_timestamp_ms) { // TODO(bugs.webrtc.org/10739): add dcheck that - // |audio_frame.absolute_capture_timestamp_ms()| always has a value. + // `audio_frame.absolute_capture_timestamp_ms()` always has a value. AudioEncoder::EncodedInfo encoded_info; uint8_t previous_pltype; @@ -334,7 +335,7 @@ int AudioCodingModuleImpl::Add10MsData(const AudioFrame& audio_frame) { MutexLock lock(&acm_mutex_); int r = Add10MsDataInternal(audio_frame, &input_data_); // TODO(bugs.webrtc.org/10739): add dcheck that - // |audio_frame.absolute_capture_timestamp_ms()| always has a value. + // `audio_frame.absolute_capture_timestamp_ms()` always has a value. return r < 0 ? r : Encode(input_data_, audio_frame.absolute_capture_timestamp_ms()); @@ -343,13 +344,13 @@ int AudioCodingModuleImpl::Add10MsData(const AudioFrame& audio_frame) { int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame, InputData* input_data) { if (audio_frame.samples_per_channel_ == 0) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); RTC_LOG(LS_ERROR) << "Cannot Add 10 ms audio, payload length is zero"; return -1; } if (audio_frame.sample_rate_hz_ > kMaxInputSampleRateHz) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); RTC_LOG(LS_ERROR) << "Cannot Add 10 ms audio, input frequency not valid"; return -1; } @@ -398,7 +399,7 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame, // output data if needed. ReMixFrame(*ptr_frame, current_num_channels, &input_data->buffer); - // For pushing data to primary, point the |ptr_audio| to correct buffer. + // For pushing data to primary, point the `ptr_audio` to correct buffer. input_data->audio = input_data->buffer.data(); RTC_DCHECK_GE(input_data->buffer.size(), input_data->length_per_channel * input_data->audio_channel); @@ -415,7 +416,7 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame, // encoder is mono and input is stereo. In case of dual-streaming, both // encoders has to be mono for down-mix to take place. // |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing -// is required, |*ptr_out| points to |in_frame|. +// is required, |*ptr_out| points to `in_frame`. // TODO(yujo): Make this more efficient for muted frames. int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, const AudioFrame** ptr_out) { @@ -604,6 +605,14 @@ ANAStats AudioCodingModuleImpl::GetANAStats() const { return ANAStats(); } +int AudioCodingModuleImpl::GetTargetBitrate() const { + MutexLock lock(&acm_mutex_); + if (!encoder_stack_) { + return -1; + } + return encoder_stack_->GetTargetBitrate(); +} + } // namespace AudioCodingModule::Config::Config( diff --git a/modules/audio_coding/acm2/audio_coding_module_unittest.cc b/modules/audio_coding/acm2/audio_coding_module_unittest.cc index 74654565e3..c429cc4723 100644 --- a/modules/audio_coding/acm2/audio_coding_module_unittest.cc +++ b/modules/audio_coding/acm2/audio_coding_module_unittest.cc @@ -342,7 +342,7 @@ TEST_F(AudioCodingModuleTestOldApi, TimestampSeriesContinuesWhenCodecChanges) { // Introduce this class to set different expectations on the number of encoded // bytes. This class expects all encoded packets to be 9 bytes (matching one -// CNG SID frame) or 0 bytes. This test depends on |input_frame_| containing +// CNG SID frame) or 0 bytes. This test depends on `input_frame_` containing // (near-)zero values. It also introduces a way to register comfort noise with // a custom payload type. class AudioCodingModuleTestWithComfortNoiseOldApi @@ -593,7 +593,7 @@ class AcmIsacMtTestOldApi : public AudioCodingModuleMtTestOldApi { InsertAudio(); ASSERT_LT(loop_counter++, 10); } - // Set |last_packet_number_| to one less that |num_calls| so that the packet + // Set `last_packet_number_` to one less that `num_calls` so that the packet // will be fetched in the next InsertPacket() call. last_packet_number_ = packet_cb_.num_calls() - 1; @@ -617,7 +617,7 @@ class AcmIsacMtTestOldApi : public AudioCodingModuleMtTestOldApi { if (num_calls > last_packet_number_) { // Get the new payload out from the callback handler. // Note that since we swap buffers here instead of directly inserting - // a pointer to the data in |packet_cb_|, we avoid locking the callback + // a pointer to the data in `packet_cb_`, we avoid locking the callback // for the duration of the IncomingPacket() call. packet_cb_.SwapBuffers(&last_payload_vec_); ASSERT_GT(last_payload_vec_.size(), 0u); @@ -835,32 +835,6 @@ TEST_F(AcmReRegisterIsacMtTestOldApi, MAYBE_DoTest) { #if !defined(WEBRTC_IOS) class AcmReceiverBitExactnessOldApi : public ::testing::Test { - public: - static std::string PlatformChecksum(std::string others, - std::string win64, - std::string android_arm32, - std::string android_arm64, - std::string android_arm64_clang, - std::string mac_arm64) { -#if defined(_WIN32) && defined(WEBRTC_ARCH_64_BITS) - return win64; -#elif defined(WEBRTC_MAC) && defined(WEBRTC_ARCH_ARM64) - return mac_arm64; -#elif defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM) - return android_arm32; -#elif defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64) -#if defined(__clang__) - // Android ARM64 with Clang compiler - return android_arm64_clang; -#else - // Android ARM64 with non-Clang compiler - return android_arm64; -#endif // __clang__ -#else - return others; -#endif - } - protected: struct ExternalDecoder { int rtp_payload_type; @@ -917,90 +891,34 @@ class AcmReceiverBitExactnessOldApi : public ::testing::Test { }; #if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \ - defined(WEBRTC_CODEC_ILBC) + defined(WEBRTC_CODEC_ILBC) && defined(WEBRTC_LINUX) && \ + defined(WEBRTC_ARCH_X86_64) TEST_F(AcmReceiverBitExactnessOldApi, 8kHzOutput) { - std::string others_checksum_reference = - GetCPUInfo(kAVX2) != 0 ? "e0c966d7b8c36ff60167988fa35d33e0" -// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different -// checksum. -#if defined(WEBRTC_LINUX) && defined(NDEBUG) - : "5af28619e3a3c606b2242c9a12f4f64e"; -#else - : "7d8f6b84abd1e57ec010a53bc2130652"; -#endif - std::string win64_checksum_reference = - GetCPUInfo(kAVX2) != 0 ? "405a50f0bcb8827e20aa944299fc59f6" - : "0ed5830930f5527a01bbec0ba11f8541"; - Run(8000, - PlatformChecksum(others_checksum_reference, win64_checksum_reference, - "b892ed69c38b21b16c132ec2ce03aa7b", - "4598140b5e4f7ee66c5adad609e65a3e", - "5fec8d770778ef7969ec98c56d9eb10f", - "636efe6d0a148f22c5383f356da3deac")); + std::string checksum_reference = GetCPUInfo(kAVX2) != 0 + ? "d8671dd38dab43fc9ca64a45c048c218" + : "4710c99559aec2f9f02a983ba2146f2d"; + Run(/*output_freq_hz=*/8000, checksum_reference); } TEST_F(AcmReceiverBitExactnessOldApi, 16kHzOutput) { - std::string others_checksum_reference = - GetCPUInfo(kAVX2) != 0 ? "a63c578e1195c8420f453962c6d8519c" - -// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different -// checksum. -#if defined(WEBRTC_LINUX) && defined(NDEBUG) - : "f788cc9200ac4a7d498d9081987808a3"; -#else - : "6bac83762c1306b932cd25a560155681"; -#endif - std::string win64_checksum_reference = - GetCPUInfo(kAVX2) != 0 ? "58fd62a5c49ee513f9fa6fe7dbf62c97" - : "0509cf0672f543efb4b050e8cffefb1d"; - Run(16000, - PlatformChecksum(others_checksum_reference, win64_checksum_reference, - "3cea9abbeabbdea9a79719941b241af5", - "f2aad418af974a3b1694d5ae5cc2c3c7", - "9d4b92c31c00e321a4cff29ad002d6a2", - "1e2d1b482fdc924f79a838503ee7ead5")); + std::string checksum_reference = GetCPUInfo(kAVX2) != 0 + ? "abcb31509af46545edb4f6700728a4de" + : "70b3217df49834b7093c631531068bd0"; + Run(/*output_freq_hz=*/16000, checksum_reference); } TEST_F(AcmReceiverBitExactnessOldApi, 32kHzOutput) { - std::string others_checksum_reference = - GetCPUInfo(kAVX2) != 0 ? "8775ce387f44dc5ff4a26da295d5ee7c" -// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different -// checksum. -#if defined(WEBRTC_LINUX) && defined(NDEBUG) - : "5b84b2a179cb8533a8f9bcd19612e7f0"; -#else - : "e319222ca47733709f90fdf33c8574db"; -#endif - std::string win64_checksum_reference = - GetCPUInfo(kAVX2) != 0 ? "04ce6a1dac5ffdd8438d804623d0132f" - : "39a4a7a1c455b35baeffb9fd193d7858"; - Run(32000, - PlatformChecksum(others_checksum_reference, win64_checksum_reference, - "4df55b3b62bcbf4328786d474ae87f61", - "100869c8dcde51346c2073e52a272d98", - "ff58d3153d2780a3df6bc2068844cb2d", - "51788e9784a10ae14a030f075a039205")); + std::string checksum_reference = GetCPUInfo(kAVX2) != 0 + ? "8489b7743d6cd1903807ac81e5ee493d" + : "2679e4e596e33259228c62df545eb635"; + Run(/*output_freq_hz=*/32000, checksum_reference); } TEST_F(AcmReceiverBitExactnessOldApi, 48kHzOutput) { - std::string others_checksum_reference = - GetCPUInfo(kAVX2) != 0 ? "7a55700b7ca9aa60237db58b33e55606" -// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different -// checksum. -#if defined(WEBRTC_LINUX) && defined(NDEBUG) - : "a2459749062f96297283cce4a8c7e6db"; -#else - : "57d1d316c88279f4f3da3511665069a9"; -#endif - std::string win64_checksum_reference = - GetCPUInfo(kAVX2) != 0 ? "f59833d9b0924f4b0704707dd3589f80" - : "74cbe7345e2b6b45c1e455a5d1e921ca"; - Run(48000, - PlatformChecksum(others_checksum_reference, win64_checksum_reference, - "f52bc7bf0f499c9da25932fdf176c4ec", - "bd44bf97e7899186532f91235cef444d", - "364d403dae55d73cd69e6dbd6b723a4d", - "71bc5c15a151400517c2119d1602ee9f")); + std::string checksum_reference = GetCPUInfo(kAVX2) != 0 + ? "454996a7adb3f62b259a53a09ff624cf" + : "f0148c5ef84e74e019ac7057af839102"; + Run(/*output_freq_hz=*/48000, checksum_reference); } TEST_F(AcmReceiverBitExactnessOldApi, 48kHzOutputExternalDecoder) { @@ -1076,27 +994,12 @@ TEST_F(AcmReceiverBitExactnessOldApi, 48kHzOutputExternalDecoder) { rtc::scoped_refptr fact_; // Fallback factory. }; - rtc::scoped_refptr> factory( - new rtc::RefCountedObject); - std::string others_checksum_reference = - GetCPUInfo(kAVX2) != 0 ? "7a55700b7ca9aa60237db58b33e55606" -// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different -// checksum. -#if defined(WEBRTC_LINUX) && defined(NDEBUG) - : "a2459749062f96297283cce4a8c7e6db"; -#else - : "57d1d316c88279f4f3da3511665069a9"; -#endif - std::string win64_checksum_reference = - GetCPUInfo(kAVX2) != 0 ? "f59833d9b0924f4b0704707dd3589f80" - : "74cbe7345e2b6b45c1e455a5d1e921ca"; - Run(48000, - PlatformChecksum(others_checksum_reference, win64_checksum_reference, - "f52bc7bf0f499c9da25932fdf176c4ec", - "bd44bf97e7899186532f91235cef444d", - "364d403dae55d73cd69e6dbd6b723a4d", - "71bc5c15a151400517c2119d1602ee9f"), - factory, [](AudioCodingModule* acm) { + auto factory = rtc::make_ref_counted(); + std::string checksum_reference = GetCPUInfo(kAVX2) != 0 + ? "454996a7adb3f62b259a53a09ff624cf" + : "f0148c5ef84e74e019ac7057af839102"; + Run(48000, checksum_reference, factory, + [](AudioCodingModule* acm) { acm->SetReceiveCodecs({{0, {"MockPCMu", 8000, 1}}, {103, {"ISAC", 16000, 1}}, {104, {"ISAC", 32000, 1}}, @@ -1140,8 +1043,8 @@ class AcmSenderBitExactnessOldApi : public ::testing::Test, // Sets up the test::AcmSendTest object. Returns true on success, otherwise // false. bool SetUpSender(std::string input_file_name, int source_rate) { - // Note that |audio_source_| will loop forever. The test duration is set - // explicitly by |kTestDurationMs|. + // Note that `audio_source_` will loop forever. The test duration is set + // explicitly by `kTestDurationMs`. audio_source_.reset(new test::InputAudioFile(input_file_name)); send_test_.reset(new test::AcmSendTestOldApi(audio_source_.get(), source_rate, kTestDurationMs)); @@ -1243,7 +1146,7 @@ class AcmSenderBitExactnessOldApi : public ::testing::Test, VerifyPacket(packet.get()); // TODO(henrik.lundin) Save the packet to file as well. - // Pass it on to the caller. The caller becomes the owner of |packet|. + // Pass it on to the caller. The caller becomes the owner of `packet`. return packet; } @@ -1308,245 +1211,182 @@ class AcmSenderBitExactnessOldApi : public ::testing::Test, class AcmSenderBitExactnessNewApi : public AcmSenderBitExactnessOldApi {}; -#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX) +// Run bit exactness tests only for release builds. +#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \ + defined(NDEBUG) && defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) TEST_F(AcmSenderBitExactnessOldApi, IsacWb30ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 480, 480)); - Run(AcmReceiverBitExactnessOldApi::PlatformChecksum( - "2c9cb15d4ed55b5a0cadd04883bc73b0", - "9336a9b993cbd8a751f0e8958e66c89c", - "5c2eb46199994506236f68b2c8e51b0d", - "343f1f42be0607c61e6516aece424609", - "2c9cb15d4ed55b5a0cadd04883bc73b0", - "2c9cb15d4ed55b5a0cadd04883bc73b0"), - AcmReceiverBitExactnessOldApi::PlatformChecksum( - "3c79f16f34218271f3dca4e2b1dfe1bb", - "d42cb5195463da26c8129bbfe73a22e6", - "83de248aea9c3c2bd680b6952401b4ca", - "3c79f16f34218271f3dca4e2b1dfe1bb", - "3c79f16f34218271f3dca4e2b1dfe1bb", - "3c79f16f34218271f3dca4e2b1dfe1bb"), - 33, test::AcmReceiveTestOldApi::kMonoOutput); + Run(/*audio_checksum_ref=*/"a3077ac01b0137e8bbc237fb1f9816a5", + /*payload_checksum_ref=*/"3c79f16f34218271f3dca4e2b1dfe1bb", + /*expected_packets=*/33, + /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput); } TEST_F(AcmSenderBitExactnessOldApi, IsacWb60ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 960, 960)); - Run(AcmReceiverBitExactnessOldApi::PlatformChecksum( - "1ad29139a04782a33daad8c2b9b35875", - "14d63c5f08127d280e722e3191b73bdd", - "9a81e467eb1485f84aca796f8ea65011", - "ef75e900e6f375e3061163c53fd09a63", - "1ad29139a04782a33daad8c2b9b35875", - "1ad29139a04782a33daad8c2b9b35875"), - AcmReceiverBitExactnessOldApi::PlatformChecksum( - "9e0a0ab743ad987b55b8e14802769c56", - "ebe04a819d3a9d83a83a17f271e1139a", - "97aeef98553b5a4b5a68f8b716e8eaf0", - "9e0a0ab743ad987b55b8e14802769c56", - "9e0a0ab743ad987b55b8e14802769c56", - "9e0a0ab743ad987b55b8e14802769c56"), - 16, test::AcmReceiveTestOldApi::kMonoOutput); + Run(/*audio_checksum_ref=*/"76da9b7514f986fc2bb32b1c3170e8d4", + /*payload_checksum_ref=*/"9e0a0ab743ad987b55b8e14802769c56", + /*expected_packets=*/16, + /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput); } #endif -#if defined(WEBRTC_ANDROID) -#define MAYBE_IsacSwb30ms DISABLED_IsacSwb30ms -#else -#define MAYBE_IsacSwb30ms IsacSwb30ms -#endif -#if defined(WEBRTC_CODEC_ISAC) -TEST_F(AcmSenderBitExactnessOldApi, MAYBE_IsacSwb30ms) { +// Run bit exactness test only for release build. +#if defined(WEBRTC_CODEC_ISAC) && defined(NDEBUG) && defined(WEBRTC_LINUX) && \ + defined(WEBRTC_ARCH_X86_64) +TEST_F(AcmSenderBitExactnessOldApi, IsacSwb30ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 32000, 1, 104, 960, 960)); - Run(AcmReceiverBitExactnessOldApi::PlatformChecksum( -// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different -// checksum. -#if defined(WEBRTC_LINUX) && defined(NDEBUG) && defined(WEBRTC_ARCH_X86) - "13d4d2a4c9e8e94a4b74a176e4bf7cc4", -#else - "5683b58da0fbf2063c7adc2e6bfb3fb8", -#endif - "2b3c387d06f00b7b7aad4c9be56fb83d", "android_arm32_audio", - "android_arm64_audio", "android_arm64_clang_audio", - "5683b58da0fbf2063c7adc2e6bfb3fb8"), - AcmReceiverBitExactnessOldApi::PlatformChecksum( - "ce86106a93419aefb063097108ec94ab", - "bcc2041e7744c7ebd9f701866856849c", "android_arm32_payload", - "android_arm64_payload", "android_arm64_clang_payload", - "ce86106a93419aefb063097108ec94ab"), - 33, test::AcmReceiveTestOldApi::kMonoOutput); + Run(/*audio_checksum_ref=*/"f4cf577f28a0dcbac33358b757518e0c", + /*payload_checksum_ref=*/"ce86106a93419aefb063097108ec94ab", + /*expected_packets=*/33, + /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput); } #endif TEST_F(AcmSenderBitExactnessOldApi, Pcm16_8000khz_10ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80)); - Run("15396f66b5b0ab6842e151c807395e4c", "c1edd36339ce0326cc4550041ad719a0", - 100, test::AcmReceiveTestOldApi::kMonoOutput); + Run(/*audio_checksum_ref=*/"69118ed438ac76252d023e0463819471", + /*payload_checksum_ref=*/"c1edd36339ce0326cc4550041ad719a0", + /*expected_packets=*/100, + /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput); } TEST_F(AcmSenderBitExactnessOldApi, Pcm16_16000khz_10ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 16000, 1, 108, 160, 160)); - Run("54ae004529874c2b362c7f0ccd19cb99", "ad786526383178b08d80d6eee06e9bad", - 100, test::AcmReceiveTestOldApi::kMonoOutput); + Run(/*audio_checksum_ref=*/"bc6ab94d12a464921763d7544fdbd07e", + /*payload_checksum_ref=*/"ad786526383178b08d80d6eee06e9bad", + /*expected_packets=*/100, + /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput); } TEST_F(AcmSenderBitExactnessOldApi, Pcm16_32000khz_10ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 32000, 1, 109, 320, 320)); - Run("d6a4a68b8c838dcc1e7ae7136467cdf0", "5ef82ea885e922263606c6fdbc49f651", - 100, test::AcmReceiveTestOldApi::kMonoOutput); + Run(/*audio_checksum_ref=*/"c50244419c5c3a2f04cc69a022c266a2", + /*payload_checksum_ref=*/"5ef82ea885e922263606c6fdbc49f651", + /*expected_packets=*/100, + /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput); } TEST_F(AcmSenderBitExactnessOldApi, Pcm16_stereo_8000khz_10ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 2, 111, 80, 80)); - Run("6b011dab43e3a8a46ccff7e4412ed8a2", "62ce5adb0d4965d0a52ec98ae7f98974", - 100, test::AcmReceiveTestOldApi::kStereoOutput); + Run(/*audio_checksum_ref=*/"4fccf4cc96f1e8e8de4b9fadf62ded9e", + /*payload_checksum_ref=*/"62ce5adb0d4965d0a52ec98ae7f98974", + /*expected_packets=*/100, + /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput); } TEST_F(AcmSenderBitExactnessOldApi, Pcm16_stereo_16000khz_10ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 16000, 2, 112, 160, 160)); - Run("17fc9854358bfe0419408290664bd78e", "41ca8edac4b8c71cd54fd9f25ec14870", - 100, test::AcmReceiveTestOldApi::kStereoOutput); + Run(/*audio_checksum_ref=*/"e15e388d9d4af8c02a59fe1552fedee3", + /*payload_checksum_ref=*/"41ca8edac4b8c71cd54fd9f25ec14870", + /*expected_packets=*/100, + /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput); } TEST_F(AcmSenderBitExactnessOldApi, Pcm16_stereo_32000khz_10ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 32000, 2, 113, 320, 320)); - Run("9ac9a1f64d55da2fc9f3167181cc511d", "50e58502fb04421bf5b857dda4c96879", - 100, test::AcmReceiveTestOldApi::kStereoOutput); + Run(/*audio_checksum_ref=*/"b240520c0d05003fde7a174ae5957286", + /*payload_checksum_ref=*/"50e58502fb04421bf5b857dda4c96879", + /*expected_packets=*/100, + /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput); } TEST_F(AcmSenderBitExactnessOldApi, Pcmu_20ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMU", 8000, 1, 0, 160, 160)); - Run("81a9d4c0bb72e9becc43aef124c981e9", "8f9b8750bd80fe26b6cbf6659b89f0f9", - 50, test::AcmReceiveTestOldApi::kMonoOutput); + Run(/*audio_checksum_ref=*/"c8d1fc677f33c2022ec5f83c7f302280", + /*payload_checksum_ref=*/"8f9b8750bd80fe26b6cbf6659b89f0f9", + /*expected_packets=*/50, + /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput); } TEST_F(AcmSenderBitExactnessOldApi, Pcma_20ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMA", 8000, 1, 8, 160, 160)); - Run("39611f798969053925a49dc06d08de29", "6ad745e55aa48981bfc790d0eeef2dd1", - 50, test::AcmReceiveTestOldApi::kMonoOutput); + Run(/*audio_checksum_ref=*/"47eb60e855eb12d1b0e6da9c975754a4", + /*payload_checksum_ref=*/"6ad745e55aa48981bfc790d0eeef2dd1", + /*expected_packets=*/50, + /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput); } TEST_F(AcmSenderBitExactnessOldApi, Pcmu_stereo_20ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMU", 8000, 2, 110, 160, 160)); - Run("437bec032fdc5cbaa0d5175430af7b18", "60b6f25e8d1e74cb679cfe756dd9bca5", - 50, test::AcmReceiveTestOldApi::kStereoOutput); + Run(/*audio_checksum_ref=*/"6ef2f57d4934714787fd0a834e3ea18e", + /*payload_checksum_ref=*/"60b6f25e8d1e74cb679cfe756dd9bca5", + /*expected_packets=*/50, + /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput); } TEST_F(AcmSenderBitExactnessOldApi, Pcma_stereo_20ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMA", 8000, 2, 118, 160, 160)); - Run("a5c6d83c5b7cedbeff734238220a4b0c", "92b282c83efd20e7eeef52ba40842cf7", - 50, test::AcmReceiveTestOldApi::kStereoOutput); + Run(/*audio_checksum_ref=*/"a84d75e098d87ab6b260687eb4b612a2", + /*payload_checksum_ref=*/"92b282c83efd20e7eeef52ba40842cf7", + /*expected_packets=*/50, + /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput); } -#if defined(WEBRTC_ANDROID) -#define MAYBE_Ilbc_30ms DISABLED_Ilbc_30ms -#else -#define MAYBE_Ilbc_30ms Ilbc_30ms -#endif -#if defined(WEBRTC_CODEC_ILBC) -TEST_F(AcmSenderBitExactnessOldApi, MAYBE_Ilbc_30ms) { +#if defined(WEBRTC_CODEC_ILBC) && defined(WEBRTC_LINUX) && \ + defined(WEBRTC_ARCH_X86_64) +TEST_F(AcmSenderBitExactnessOldApi, Ilbc_30ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("ILBC", 8000, 1, 102, 240, 240)); - Run(AcmReceiverBitExactnessOldApi::PlatformChecksum( - "7b6ec10910debd9af08011d3ed5249f7", - "7b6ec10910debd9af08011d3ed5249f7", "android_arm32_audio", - "android_arm64_audio", "android_arm64_clang_audio", - "7b6ec10910debd9af08011d3ed5249f7"), - AcmReceiverBitExactnessOldApi::PlatformChecksum( - "cfae2e9f6aba96e145f2bcdd5050ce78", - "cfae2e9f6aba96e145f2bcdd5050ce78", "android_arm32_payload", - "android_arm64_payload", "android_arm64_clang_payload", - "cfae2e9f6aba96e145f2bcdd5050ce78"), - 33, test::AcmReceiveTestOldApi::kMonoOutput); + Run(/*audio_checksum_ref=*/"b14dba0de36efa5ec88a32c0b320b70f", + /*payload_checksum_ref=*/"cfae2e9f6aba96e145f2bcdd5050ce78", + /*expected_packets=*/33, + /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput); } #endif -#if defined(WEBRTC_ANDROID) -#define MAYBE_G722_20ms DISABLED_G722_20ms -#else -#define MAYBE_G722_20ms G722_20ms -#endif -TEST_F(AcmSenderBitExactnessOldApi, MAYBE_G722_20ms) { +#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) +TEST_F(AcmSenderBitExactnessOldApi, G722_20ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 1, 9, 320, 160)); - Run(AcmReceiverBitExactnessOldApi::PlatformChecksum( - "e99c89be49a46325d03c0d990c292d68", - "e99c89be49a46325d03c0d990c292d68", "android_arm32_audio", - "android_arm64_audio", "android_arm64_clang_audio", - "e99c89be49a46325d03c0d990c292d68"), - AcmReceiverBitExactnessOldApi::PlatformChecksum( - "fc68a87e1380614e658087cb35d5ca10", - "fc68a87e1380614e658087cb35d5ca10", "android_arm32_payload", - "android_arm64_payload", "android_arm64_clang_payload", - "fc68a87e1380614e658087cb35d5ca10"), - 50, test::AcmReceiveTestOldApi::kMonoOutput); + Run(/*audio_checksum_ref=*/"a87a91ec0124510a64967f5d768554ff", + /*payload_checksum_ref=*/"fc68a87e1380614e658087cb35d5ca10", + /*expected_packets=*/50, + /*expected_channels=*/test::AcmReceiveTestOldApi::kMonoOutput); } - -#if defined(WEBRTC_ANDROID) -#define MAYBE_G722_stereo_20ms DISABLED_G722_stereo_20ms -#else -#define MAYBE_G722_stereo_20ms G722_stereo_20ms #endif -TEST_F(AcmSenderBitExactnessOldApi, MAYBE_G722_stereo_20ms) { + +#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) +TEST_F(AcmSenderBitExactnessOldApi, G722_stereo_20ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 2, 119, 320, 160)); - Run(AcmReceiverBitExactnessOldApi::PlatformChecksum( - "e280aed283e499d37091b481ca094807", - "e280aed283e499d37091b481ca094807", "android_arm32_audio", - "android_arm64_audio", "android_arm64_clang_audio", - "e280aed283e499d37091b481ca094807"), - AcmReceiverBitExactnessOldApi::PlatformChecksum( - "66516152eeaa1e650ad94ff85f668dac", - "66516152eeaa1e650ad94ff85f668dac", "android_arm32_payload", - "android_arm64_payload", "android_arm64_clang_payload", - "66516152eeaa1e650ad94ff85f668dac"), - 50, test::AcmReceiveTestOldApi::kStereoOutput); + Run(/*audio_checksum_ref=*/"be0b8528ff9db3a2219f55ddd36faf7f", + /*payload_checksum_ref=*/"66516152eeaa1e650ad94ff85f668dac", + /*expected_packets=*/50, + /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput); } +#endif namespace { // Checksum depends on libopus being compiled with or without SSE. -const std::string audio_maybe_sse = - "e0ddf36854059151cdb7a0c4af3d282a" - "|32574e78db4eab0c467d3c0785e3b484"; -const std::string payload_maybe_sse = - "b43bdf7638b2bc2a5a6f30bdc640b9ed" - "|c30d463e7ed10bdd1da9045f80561f27"; -// Common checksums. const std::string audio_checksum = - AcmReceiverBitExactnessOldApi::PlatformChecksum( - audio_maybe_sse, - audio_maybe_sse, - "6fcceb83acf427730570bc13eeac920c", - "fd96f15d547c4e155daeeef4253b174e", - "fd96f15d547c4e155daeeef4253b174e", - "Mac_arm64_checksum_placeholder"); + "6a76fe2ffba057c06eb63239b3c47abe" + "|0c4f9d33b4a7379a34ee0c0d5718afe6"; const std::string payload_checksum = - AcmReceiverBitExactnessOldApi::PlatformChecksum( - payload_maybe_sse, - payload_maybe_sse, - "4bd846d0aa5656ecd5dfd85701a1b78c", - "7efbfc9f8e3b4b2933ae2d01ab919028", - "7efbfc9f8e3b4b2933ae2d01ab919028", - "Mac_arm64_checksum_placeholder"); + "b43bdf7638b2bc2a5a6f30bdc640b9ed" + "|c30d463e7ed10bdd1da9045f80561f27"; } // namespace -// TODO(http://bugs.webrtc.org/12518): Enable the test after Opus has been -// updated. -TEST_F(AcmSenderBitExactnessOldApi, DISABLED_Opus_stereo_20ms) { +#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) +TEST_F(AcmSenderBitExactnessOldApi, Opus_stereo_20ms) { ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 2, 120, 960, 960)); - Run(audio_checksum, payload_checksum, 50, - test::AcmReceiveTestOldApi::kStereoOutput); + Run(audio_checksum, payload_checksum, /*expected_packets=*/50, + /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput); } +#endif -// TODO(http://bugs.webrtc.org/12518): Enable the test after Opus has been -// updated. -TEST_F(AcmSenderBitExactnessNewApi, DISABLED_OpusFromFormat_stereo_20ms) { +#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) +TEST_F(AcmSenderBitExactnessNewApi, OpusFromFormat_stereo_20ms) { const auto config = AudioEncoderOpus::SdpToConfig( SdpAudioFormat("opus", 48000, 2, {{"stereo", "1"}})); ASSERT_TRUE(SetUpSender(kTestFileFakeStereo32kHz, 32000)); ASSERT_NO_FATAL_FAILURE(SetUpTestExternalEncoder( AudioEncoderOpus::MakeAudioEncoder(*config, 120), 120)); - Run(audio_checksum, payload_checksum, 50, - test::AcmReceiveTestOldApi::kStereoOutput); + Run(audio_checksum, payload_checksum, /*expected_packets=*/50, + /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput); } +#endif // TODO(webrtc:8649): Disabled until the Encoder counterpart of // https://webrtc-review.googlesource.com/c/src/+/129768 lands. +#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) TEST_F(AcmSenderBitExactnessNewApi, DISABLED_OpusManyChannels) { constexpr int kNumChannels = 4; constexpr int kOpusPayloadType = 120; @@ -1574,27 +1414,19 @@ TEST_F(AcmSenderBitExactnessNewApi, DISABLED_OpusManyChannels) { AudioDecoderMultiChannelOpus::MakeAudioDecoder(*decoder_config); rtc::scoped_refptr decoder_factory = - new rtc::RefCountedObject( - opus_decoder.get()); + rtc::make_ref_counted(opus_decoder.get()); // Set up an EXTERNAL DECODER to parse 4 channels. - Run(AcmReceiverBitExactnessOldApi::PlatformChecksum( // audio checksum - "audio checksum check downstream|8051617907766bec5f4e4a4f7c6d5291", - "8051617907766bec5f4e4a4f7c6d5291", - "6183752a62dc1368f959eb3a8c93b846", "android arm64 audio checksum", - "48bf1f3ca0b72f3c9cdfbe79956122b1", "Mac_arm64_checksum_placeholder"), - // payload_checksum, - AcmReceiverBitExactnessOldApi::PlatformChecksum( // payload checksum - "payload checksum check downstream|b09c52e44b2bdd9a0809e3a5b1623a76", - "b09c52e44b2bdd9a0809e3a5b1623a76", - "2ea535ef60f7d0c9d89e3002d4c2124f", "android arm64 payload checksum", - "e87995a80f50a0a735a230ca8b04a67d", "Mac_arm64_checksum_placeholder"), - 50, test::AcmReceiveTestOldApi::kQuadOutput, decoder_factory); + Run("audio checksum check downstream|8051617907766bec5f4e4a4f7c6d5291", + "payload checksum check downstream|b09c52e44b2bdd9a0809e3a5b1623a76", + /*expected_packets=*/50, + /*expected_channels=*/test::AcmReceiveTestOldApi::kQuadOutput, + decoder_factory); } +#endif -// TODO(http://bugs.webrtc.org/12518): Enable the test after Opus has been -// updated. -TEST_F(AcmSenderBitExactnessNewApi, DISABLED_OpusFromFormat_stereo_20ms_voip) { +#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) +TEST_F(AcmSenderBitExactnessNewApi, OpusFromFormat_stereo_20ms_voip) { auto config = AudioEncoderOpus::SdpToConfig( SdpAudioFormat("opus", 48000, 2, {{"stereo", "1"}})); // If not set, default will be kAudio in case of stereo. @@ -1603,22 +1435,15 @@ TEST_F(AcmSenderBitExactnessNewApi, DISABLED_OpusFromFormat_stereo_20ms_voip) { ASSERT_NO_FATAL_FAILURE(SetUpTestExternalEncoder( AudioEncoderOpus::MakeAudioEncoder(*config, 120), 120)); const std::string audio_maybe_sse = - "2d7e5797444f75e5bfeaffbd8c25176b" - "|408d4bdc05a8c23e46c6ac06c5b917ee"; + "058c03ca2c9bb5c0066d4c15ce50d772" + "|ca54661b220cc35239c6864ab858d29a"; const std::string payload_maybe_sse = - "b38b5584cfa7b6999b2e8e996c950c88" + "f270ec7be7a5ed60c203c2317c4e1011" "|eb0752ce1b6f2436fefc2e19bd084fb5"; - Run(AcmReceiverBitExactnessOldApi::PlatformChecksum( - audio_maybe_sse, audio_maybe_sse, "f1cefe107ffdced7694d7f735342adf3", - "3b1bfe5dd8ed16ee5b04b93a5b5e7e48", - "3b1bfe5dd8ed16ee5b04b93a5b5e7e48", "Mac_arm64_checksum_placeholder"), - AcmReceiverBitExactnessOldApi::PlatformChecksum( - payload_maybe_sse, payload_maybe_sse, - "5e79a2f51c633fe145b6c10ae198d1aa", - "e730050cb304d54d853fd285ab0424fa", - "e730050cb304d54d853fd285ab0424fa", "Mac_arm64_checksum_placeholder"), - 50, test::AcmReceiveTestOldApi::kStereoOutput); + Run(audio_maybe_sse, payload_maybe_sse, /*expected_packets=*/50, + /*expected_channels=*/test::AcmReceiveTestOldApi::kStereoOutput); } +#endif // This test is for verifying the SetBitRate function. The bitrate is changed at // the beginning, and the number of generated bytes are checked. @@ -1631,8 +1456,8 @@ class AcmSetBitRateTest : public ::testing::Test { bool SetUpSender() { const std::string input_file_name = webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"); - // Note that |audio_source_| will loop forever. The test duration is set - // explicitly by |kTestDurationMs|. + // Note that `audio_source_` will loop forever. The test duration is set + // explicitly by `kTestDurationMs`. audio_source_.reset(new test::InputAudioFile(input_file_name)); static const int kSourceRateHz = 32000; send_test_.reset(new test::AcmSendTestOldApi( @@ -1853,13 +1678,13 @@ TEST_F(AcmSenderBitExactnessOldApi, External_Pcmu_20ms) { ASSERT_TRUE(SetUpSender(kTestFileMono32kHz, 32000)); ASSERT_NO_FATAL_FAILURE( SetUpTestExternalEncoder(std::move(mock_encoder), config.payload_type)); - Run("81a9d4c0bb72e9becc43aef124c981e9", "8f9b8750bd80fe26b6cbf6659b89f0f9", + Run("c8d1fc677f33c2022ec5f83c7f302280", "8f9b8750bd80fe26b6cbf6659b89f0f9", 50, test::AcmReceiveTestOldApi::kMonoOutput); } // This test fixture is implemented to run ACM and change the desired output // frequency during the call. The input packets are simply PCM16b-wb encoded -// payloads with a constant value of |kSampleValue|. The test fixture itself +// payloads with a constant value of `kSampleValue`. The test fixture itself // acts as PacketSource in between the receive test class and the constant- // payload packet source class. The output is both written to file, and analyzed // in this test fixture. diff --git a/modules/audio_coding/acm2/call_statistics.cc b/modules/audio_coding/acm2/call_statistics.cc index e97e529688..9f3bdadc88 100644 --- a/modules/audio_coding/acm2/call_statistics.cc +++ b/modules/audio_coding/acm2/call_statistics.cc @@ -44,8 +44,8 @@ void CallStatistics::DecodedByNetEq(AudioFrame::SpeechType speech_type, break; } case AudioFrame::kUndefined: { - // If the audio is decoded by NetEq, |kUndefined| is not an option. - RTC_NOTREACHED(); + // If the audio is decoded by NetEq, `kUndefined` is not an option. + RTC_DCHECK_NOTREACHED(); } } } diff --git a/modules/audio_coding/acm2/call_statistics.h b/modules/audio_coding/acm2/call_statistics.h index 5d94ac4538..a2db2a29f4 100644 --- a/modules/audio_coding/acm2/call_statistics.h +++ b/modules/audio_coding/acm2/call_statistics.h @@ -36,8 +36,8 @@ class CallStatistics { CallStatistics() {} ~CallStatistics() {} - // Call this method to indicate that NetEq engaged in decoding. |speech_type| - // is the audio-type according to NetEq, and |muted| indicates if the decoded + // Call this method to indicate that NetEq engaged in decoding. `speech_type` + // is the audio-type according to NetEq, and `muted` indicates if the decoded // frame was produced in muted state. void DecodedByNetEq(AudioFrame::SpeechType speech_type, bool muted); diff --git a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h index 1c91fa19a8..664e76bda5 100644 --- a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h +++ b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h @@ -21,7 +21,6 @@ #include "modules/audio_coding/audio_network_adaptor/debug_dump_writer.h" #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h" #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -44,6 +43,9 @@ class AudioNetworkAdaptorImpl final : public AudioNetworkAdaptor { ~AudioNetworkAdaptorImpl() override; + AudioNetworkAdaptorImpl(const AudioNetworkAdaptorImpl&) = delete; + AudioNetworkAdaptorImpl& operator=(const AudioNetworkAdaptorImpl&) = delete; + void SetUplinkBandwidth(int uplink_bandwidth_bps) override; void SetUplinkPacketLossFraction(float uplink_packet_loss_fraction) override; @@ -80,8 +82,6 @@ class AudioNetworkAdaptorImpl final : public AudioNetworkAdaptor { absl::optional prev_config_; ANAStats stats_; - - RTC_DISALLOW_COPY_AND_ASSIGN(AudioNetworkAdaptorImpl); }; } // namespace webrtc diff --git a/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc b/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc index 40c8659069..88ca38d074 100644 --- a/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc +++ b/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc @@ -50,7 +50,7 @@ void BitrateController::UpdateNetworkMetrics( } void BitrateController::MakeDecision(AudioEncoderRuntimeConfig* config) { - // Decision on |bitrate_bps| should not have been made. + // Decision on `bitrate_bps` should not have been made. RTC_DCHECK(!config->bitrate_bps); if (target_audio_bitrate_bps_ && overhead_bytes_per_packet_) { if (config->frame_length_ms) diff --git a/modules/audio_coding/audio_network_adaptor/bitrate_controller.h b/modules/audio_coding/audio_network_adaptor/bitrate_controller.h index 41bfbd1c32..c1032146cc 100644 --- a/modules/audio_coding/audio_network_adaptor/bitrate_controller.h +++ b/modules/audio_coding/audio_network_adaptor/bitrate_controller.h @@ -16,7 +16,6 @@ #include "absl/types/optional.h" #include "modules/audio_coding/audio_network_adaptor/controller.h" #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { namespace audio_network_adaptor { @@ -39,6 +38,9 @@ class BitrateController final : public Controller { ~BitrateController() override; + BitrateController(const BitrateController&) = delete; + BitrateController& operator=(const BitrateController&) = delete; + void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override; void MakeDecision(AudioEncoderRuntimeConfig* config) override; @@ -49,7 +51,6 @@ class BitrateController final : public Controller { int frame_length_ms_; absl::optional target_audio_bitrate_bps_; absl::optional overhead_bytes_per_packet_; - RTC_DISALLOW_COPY_AND_ASSIGN(BitrateController); }; } // namespace audio_network_adaptor diff --git a/modules/audio_coding/audio_network_adaptor/bitrate_controller_unittest.cc b/modules/audio_coding/audio_network_adaptor/bitrate_controller_unittest.cc index 76f52ade80..3155f198a4 100644 --- a/modules/audio_coding/audio_network_adaptor/bitrate_controller_unittest.cc +++ b/modules/audio_coding/audio_network_adaptor/bitrate_controller_unittest.cc @@ -85,7 +85,7 @@ TEST(AnaBitrateControllerTest, ChangeBitrateOnTargetBitrateChanged) { 1000 / kInitialFrameLengthMs; // Frame length unchanged, bitrate changes in accordance with - // |metrics.target_audio_bitrate_bps| and |metrics.overhead_bytes_per_packet|. + // `metrics.target_audio_bitrate_bps` and `metrics.overhead_bytes_per_packet`. UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket); CheckDecision(&controller, kInitialFrameLengthMs, kBitrateBps); } diff --git a/modules/audio_coding/audio_network_adaptor/channel_controller.cc b/modules/audio_coding/audio_network_adaptor/channel_controller.cc index 2f5af6733e..2ef2f4c4d8 100644 --- a/modules/audio_coding/audio_network_adaptor/channel_controller.cc +++ b/modules/audio_coding/audio_network_adaptor/channel_controller.cc @@ -28,7 +28,7 @@ ChannelController::Config::Config(size_t num_encoder_channels, ChannelController::ChannelController(const Config& config) : config_(config), channels_to_encode_(config_.intial_channels_to_encode) { RTC_DCHECK_GT(config_.intial_channels_to_encode, 0lu); - // Currently, we require |intial_channels_to_encode| to be <= 2. + // Currently, we require `intial_channels_to_encode` to be <= 2. RTC_DCHECK_LE(config_.intial_channels_to_encode, 2lu); RTC_DCHECK_GE(config_.num_encoder_channels, config_.intial_channels_to_encode); @@ -43,7 +43,7 @@ void ChannelController::UpdateNetworkMetrics( } void ChannelController::MakeDecision(AudioEncoderRuntimeConfig* config) { - // Decision on |num_channels| should not have been made. + // Decision on `num_channels` should not have been made. RTC_DCHECK(!config->num_channels); if (uplink_bandwidth_bps_) { diff --git a/modules/audio_coding/audio_network_adaptor/channel_controller.h b/modules/audio_coding/audio_network_adaptor/channel_controller.h index f211f40f17..3cd4bb7dec 100644 --- a/modules/audio_coding/audio_network_adaptor/channel_controller.h +++ b/modules/audio_coding/audio_network_adaptor/channel_controller.h @@ -16,7 +16,6 @@ #include "absl/types/optional.h" #include "modules/audio_coding/audio_network_adaptor/controller.h" #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -41,6 +40,9 @@ class ChannelController final : public Controller { ~ChannelController() override; + ChannelController(const ChannelController&) = delete; + ChannelController& operator=(const ChannelController&) = delete; + void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override; void MakeDecision(AudioEncoderRuntimeConfig* config) override; @@ -49,7 +51,6 @@ class ChannelController final : public Controller { const Config config_; size_t channels_to_encode_; absl::optional uplink_bandwidth_bps_; - RTC_DISALLOW_COPY_AND_ASSIGN(ChannelController); }; } // namespace webrtc diff --git a/modules/audio_coding/audio_network_adaptor/config.proto b/modules/audio_coding/audio_network_adaptor/config.proto index 347372e8d9..a815451993 100644 --- a/modules/audio_coding/audio_network_adaptor/config.proto +++ b/modules/audio_coding/audio_network_adaptor/config.proto @@ -5,6 +5,7 @@ package webrtc.audio_network_adaptor.config; option optimize_for = LITE_RUNTIME; option java_package = "org.webrtc.AudioNetworkAdaptor"; option java_outer_classname = "Config"; +option objc_class_prefix = "WANA"; message FecController { message Threshold { @@ -23,8 +24,8 @@ message FecController { optional float high_bandwidth_packet_loss = 4; } - // |fec_enabling_threshold| defines a curve, above which FEC should be - // enabled. |fec_disabling_threshold| defines a curve, under which FEC + // `fec_enabling_threshold` defines a curve, above which FEC should be + // enabled. `fec_disabling_threshold` defines a curve, under which FEC // should be disabled. See below // // packet-loss ^ | | @@ -36,7 +37,7 @@ message FecController { optional Threshold fec_enabling_threshold = 1; optional Threshold fec_disabling_threshold = 2; - // |time_constant_ms| is the time constant for an exponential filter, which + // `time_constant_ms` is the time constant for an exponential filter, which // is used for smoothing the packet loss fraction. optional int32 time_constant_ms = 3; } @@ -62,8 +63,8 @@ message FecControllerRplrBased { optional float high_bandwidth_recoverable_packet_loss = 4; } - // |fec_enabling_threshold| defines a curve, above which FEC should be - // enabled. |fec_disabling_threshold| defines a curve, under which FEC + // `fec_enabling_threshold` defines a curve, above which FEC should be + // enabled. `fec_disabling_threshold` defines a curve, under which FEC // should be disabled. See below // // packet-loss ^ | | @@ -122,7 +123,7 @@ message FrameLengthControllerV2 { // FrameLengthControllerV2 chooses the frame length by taking the target // bitrate and subtracting the overhead bitrate to obtain the remaining // bitrate for the payload. The chosen frame length is the shortest possible - // where the payload bitrate is more than |min_payload_bitrate_bps|. + // where the payload bitrate is more than `min_payload_bitrate_bps`. optional int32 min_payload_bitrate_bps = 1; // If true, uses the stable target bitrate to decide the frame length. This @@ -158,18 +159,18 @@ message BitrateController { message Controller { message ScoringPoint { - // |ScoringPoint| is a subspace of network condition. It is used for + // `ScoringPoint` is a subspace of network condition. It is used for // comparing the significance of controllers. optional int32 uplink_bandwidth_bps = 1; optional float uplink_packet_loss_fraction = 2; } - // The distance from |scoring_point| to a given network condition defines + // The distance from `scoring_point` to a given network condition defines // the significance of this controller with respect that network condition. // Shorter distance means higher significance. The significances of // controllers determine their order in the processing pipeline. Controllers - // without |scoring_point| follow their default order in - // |ControllerManager::controllers|. + // without `scoring_point` follow their default order in + // `ControllerManager::controllers`. optional ScoringPoint scoring_point = 1; oneof controller { diff --git a/modules/audio_coding/audio_network_adaptor/controller_manager.cc b/modules/audio_coding/audio_network_adaptor/controller_manager.cc index 415b9fcf52..87759c37ea 100644 --- a/modules/audio_coding/audio_network_adaptor/controller_manager.cc +++ b/modules/audio_coding/audio_network_adaptor/controller_manager.cc @@ -265,7 +265,7 @@ std::unique_ptr ControllerManagerImpl::Create( break; case audio_network_adaptor::config::Controller::kFecControllerRplrBased: // FecControllerRplrBased has been removed and can't be used anymore. - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); continue; case audio_network_adaptor::config::Controller::kFrameLengthController: controller = CreateFrameLengthController( @@ -293,7 +293,7 @@ std::unique_ptr ControllerManagerImpl::Create( encoder_frame_lengths_ms); break; default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } if (controller_config.has_scoring_point()) { auto& scoring_point = controller_config.scoring_point(); @@ -321,7 +321,7 @@ std::unique_ptr ControllerManagerImpl::Create( } #else - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return nullptr; #endif // WEBRTC_ENABLE_PROTOBUF } @@ -373,14 +373,14 @@ std::vector ControllerManagerImpl::GetSortedControllers( config_.min_reordering_squared_distance) return sorted_controllers_; - // Sort controllers according to the distances of |scoring_point| to the + // Sort controllers according to the distances of `scoring_point` to the // scoring points of controllers. // // A controller that does not associate with any scoring point // are treated as if // 1) they are less important than any controller that has a scoring point, // 2) they are equally important to any controller that has no scoring point, - // and their relative order will follow |default_sorted_controllers_|. + // and their relative order will follow `default_sorted_controllers_`. std::vector sorted_controllers(default_sorted_controllers_); std::stable_sort( sorted_controllers.begin(), sorted_controllers.end(), @@ -430,7 +430,7 @@ float NormalizeUplinkBandwidth(int uplink_bandwidth_bps) { } float NormalizePacketLossFraction(float uplink_packet_loss_fraction) { - // |uplink_packet_loss_fraction| is seldom larger than 0.3, so we scale it up + // `uplink_packet_loss_fraction` is seldom larger than 0.3, so we scale it up // by 3.3333f. return std::min(uplink_packet_loss_fraction * 3.3333f, 1.0f); } diff --git a/modules/audio_coding/audio_network_adaptor/controller_manager.h b/modules/audio_coding/audio_network_adaptor/controller_manager.h index f46450df95..f7d7b34fb1 100644 --- a/modules/audio_coding/audio_network_adaptor/controller_manager.h +++ b/modules/audio_coding/audio_network_adaptor/controller_manager.h @@ -17,7 +17,6 @@ #include #include "modules/audio_coding/audio_network_adaptor/controller.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -80,6 +79,9 @@ class ControllerManagerImpl final : public ControllerManager { ~ControllerManagerImpl() override; + ControllerManagerImpl(const ControllerManagerImpl&) = delete; + ControllerManagerImpl& operator=(const ControllerManagerImpl&) = delete; + // Sort controllers based on their significance. std::vector GetSortedControllers( const Controller::NetworkMetrics& metrics) override; @@ -111,11 +113,9 @@ class ControllerManagerImpl final : public ControllerManager { std::vector sorted_controllers_; - // |scoring_points_| saves the scoring points of various + // `scoring_points_` saves the scoring points of various // controllers. std::map controller_scoring_points_; - - RTC_DISALLOW_COPY_AND_ASSIGN(ControllerManagerImpl); }; } // namespace webrtc diff --git a/modules/audio_coding/audio_network_adaptor/controller_manager_unittest.cc b/modules/audio_coding/audio_network_adaptor/controller_manager_unittest.cc index c71bbc9e2a..7b7ced97c5 100644 --- a/modules/audio_coding/audio_network_adaptor/controller_manager_unittest.cc +++ b/modules/audio_coding/audio_network_adaptor/controller_manager_unittest.cc @@ -43,7 +43,7 @@ constexpr int kMinReorderingTimeMs = 200; constexpr int kFactor = 100; constexpr float kMinReorderingSquareDistance = 1.0f / kFactor / kFactor; -// |kMinUplinkBandwidthBps| and |kMaxUplinkBandwidthBps| are copied from +// `kMinUplinkBandwidthBps` and `kMaxUplinkBandwidthBps` are copied from // controller_manager.cc constexpr int kMinUplinkBandwidthBps = 0; constexpr int kMaxUplinkBandwidthBps = 120000; @@ -82,7 +82,7 @@ ControllerManagerStates CreateControllerManager() { return states; } -// |expected_order| contains the expected indices of all controllers in the +// `expected_order` contains the expected indices of all controllers in the // vector of controllers returned by GetSortedControllers(). A negative index // means that we do not care about its exact place, but we do check that it // exists in the vector. @@ -112,8 +112,8 @@ void CheckControllersOrder( TEST(ControllerManagerTest, GetControllersReturnAllControllers) { auto states = CreateControllerManager(); auto check = states.controller_manager->GetControllers(); - // Verify that controllers in |check| are one-to-one mapped to those in - // |mock_controllers_|. + // Verify that controllers in `check` are one-to-one mapped to those in + // `mock_controllers_`. EXPECT_EQ(states.mock_controllers.size(), check.size()); for (auto& controller : check) EXPECT_NE(states.mock_controllers.end(), @@ -123,7 +123,7 @@ TEST(ControllerManagerTest, GetControllersReturnAllControllers) { TEST(ControllerManagerTest, ControllersInDefaultOrderOnEmptyNetworkMetrics) { auto states = CreateControllerManager(); - // |network_metrics| are empty, and the controllers are supposed to follow the + // `network_metrics` are empty, and the controllers are supposed to follow the // default order. CheckControllersOrder(&states, absl::nullopt, absl::nullopt, {0, 1, 2, 3}); } @@ -304,7 +304,7 @@ void CheckControllersOrder(const std::vector& controllers, for (size_t i = 0; i < controllers.size(); ++i) { AudioEncoderRuntimeConfig encoder_config; - // We check the order of |controllers| by judging their decisions. + // We check the order of `controllers` by judging their decisions. controllers[i]->MakeDecision(&encoder_config); // Since controllers are not provided with network metrics, they give the diff --git a/modules/audio_coding/audio_network_adaptor/debug_dump.proto b/modules/audio_coding/audio_network_adaptor/debug_dump.proto index 93b31c3fe8..3aa6a504f3 100644 --- a/modules/audio_coding/audio_network_adaptor/debug_dump.proto +++ b/modules/audio_coding/audio_network_adaptor/debug_dump.proto @@ -21,7 +21,7 @@ message EncoderRuntimeConfig { optional bool enable_fec = 4; optional bool enable_dtx = 5; // Some encoders can encode fewer channels than the actual input to make - // better use of the bandwidth. |num_channels| sets the number of channels + // better use of the bandwidth. `num_channels` sets the number of channels // to encode. optional uint32 num_channels = 6; } diff --git a/modules/audio_coding/audio_network_adaptor/debug_dump_writer.cc b/modules/audio_coding/audio_network_adaptor/debug_dump_writer.cc index 669cf5ee53..2616706ee5 100644 --- a/modules/audio_coding/audio_network_adaptor/debug_dump_writer.cc +++ b/modules/audio_coding/audio_network_adaptor/debug_dump_writer.cc @@ -76,7 +76,7 @@ DebugDumpWriterImpl::DebugDumpWriterImpl(FILE* file_handle) { dump_file_ = FileWrapper(file_handle); RTC_CHECK(dump_file_.is_open()); #else - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); #endif } diff --git a/modules/audio_coding/audio_network_adaptor/debug_dump_writer.h b/modules/audio_coding/audio_network_adaptor/debug_dump_writer.h index 367f659542..8fdf2f7728 100644 --- a/modules/audio_coding/audio_network_adaptor/debug_dump_writer.h +++ b/modules/audio_coding/audio_network_adaptor/debug_dump_writer.h @@ -15,7 +15,6 @@ #include "modules/audio_coding/audio_network_adaptor/controller.h" #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/ignore_wundef.h" #include "rtc_base/system/file_wrapper.h" #if WEBRTC_ENABLE_PROTOBUF diff --git a/modules/audio_coding/audio_network_adaptor/dtx_controller.cc b/modules/audio_coding/audio_network_adaptor/dtx_controller.cc index 48384c9fb5..b0a7d5d59d 100644 --- a/modules/audio_coding/audio_network_adaptor/dtx_controller.cc +++ b/modules/audio_coding/audio_network_adaptor/dtx_controller.cc @@ -33,7 +33,7 @@ void DtxController::UpdateNetworkMetrics( } void DtxController::MakeDecision(AudioEncoderRuntimeConfig* config) { - // Decision on |enable_dtx| should not have been made. + // Decision on `enable_dtx` should not have been made. RTC_DCHECK(!config->enable_dtx); if (uplink_bandwidth_bps_) { diff --git a/modules/audio_coding/audio_network_adaptor/dtx_controller.h b/modules/audio_coding/audio_network_adaptor/dtx_controller.h index 83fdf3ddd7..b8a8e476e4 100644 --- a/modules/audio_coding/audio_network_adaptor/dtx_controller.h +++ b/modules/audio_coding/audio_network_adaptor/dtx_controller.h @@ -14,7 +14,6 @@ #include "absl/types/optional.h" #include "modules/audio_coding/audio_network_adaptor/controller.h" #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -35,6 +34,9 @@ class DtxController final : public Controller { ~DtxController() override; + DtxController(const DtxController&) = delete; + DtxController& operator=(const DtxController&) = delete; + void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override; void MakeDecision(AudioEncoderRuntimeConfig* config) override; @@ -43,7 +45,6 @@ class DtxController final : public Controller { const Config config_; bool dtx_enabled_; absl::optional uplink_bandwidth_bps_; - RTC_DISALLOW_COPY_AND_ASSIGN(DtxController); }; } // namespace webrtc diff --git a/modules/audio_coding/audio_network_adaptor/event_log_writer.h b/modules/audio_coding/audio_network_adaptor/event_log_writer.h index c5e57e63e6..a147311fc7 100644 --- a/modules/audio_coding/audio_network_adaptor/event_log_writer.h +++ b/modules/audio_coding/audio_network_adaptor/event_log_writer.h @@ -12,7 +12,6 @@ #define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_EVENT_LOG_WRITER_H_ #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { class RtcEventLog; @@ -24,6 +23,10 @@ class EventLogWriter final { float min_bitrate_change_fraction, float min_packet_loss_change_fraction); ~EventLogWriter(); + + EventLogWriter(const EventLogWriter&) = delete; + EventLogWriter& operator=(const EventLogWriter&) = delete; + void MaybeLogEncoderConfig(const AudioEncoderRuntimeConfig& config); private: @@ -34,7 +37,6 @@ class EventLogWriter final { const float min_bitrate_change_fraction_; const float min_packet_loss_change_fraction_; AudioEncoderRuntimeConfig last_logged_config_; - RTC_DISALLOW_COPY_AND_ASSIGN(EventLogWriter); }; } // namespace webrtc diff --git a/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.cc b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.cc index 936e22429a..c5e5fa76e3 100644 --- a/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.cc +++ b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.cc @@ -26,7 +26,7 @@ class NullSmoothingFilter final : public SmoothingFilter { absl::optional GetAverage() override { return last_sample_; } bool SetTimeConstantMs(int time_constant_ms) override { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return false; } diff --git a/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h index 87afe2ebc1..0c57ad1d1e 100644 --- a/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h +++ b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h @@ -18,15 +18,14 @@ #include "modules/audio_coding/audio_network_adaptor/controller.h" #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h" #include "modules/audio_coding/audio_network_adaptor/util/threshold_curve.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { class FecControllerPlrBased final : public Controller { public: struct Config { - // |fec_enabling_threshold| defines a curve, above which FEC should be - // enabled. |fec_disabling_threshold| defines a curve, under which FEC + // `fec_enabling_threshold` defines a curve, above which FEC should be + // enabled. `fec_disabling_threshold` defines a curve, under which FEC // should be disabled. See below // // packet-loss ^ | | @@ -53,6 +52,9 @@ class FecControllerPlrBased final : public Controller { ~FecControllerPlrBased() override; + FecControllerPlrBased(const FecControllerPlrBased&) = delete; + FecControllerPlrBased& operator=(const FecControllerPlrBased&) = delete; + void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override; void MakeDecision(AudioEncoderRuntimeConfig* config) override; @@ -65,8 +67,6 @@ class FecControllerPlrBased final : public Controller { bool fec_enabled_; absl::optional uplink_bandwidth_bps_; const std::unique_ptr packet_loss_smoother_; - - RTC_DISALLOW_COPY_AND_ASSIGN(FecControllerPlrBased); }; } // namespace webrtc diff --git a/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc index d95cbceba9..743b087163 100644 --- a/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc +++ b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc @@ -100,9 +100,9 @@ void UpdateNetworkMetrics(FecControllerPlrBasedTestStates* states, } } -// Checks that the FEC decision and |uplink_packet_loss_fraction| given by -// |states->controller->MakeDecision| matches |expected_enable_fec| and -// |expected_uplink_packet_loss_fraction|, respectively. +// Checks that the FEC decision and `uplink_packet_loss_fraction` given by +// `states->controller->MakeDecision` matches `expected_enable_fec` and +// `expected_uplink_packet_loss_fraction`, respectively. void CheckDecision(FecControllerPlrBasedTestStates* states, bool expected_enable_fec, float expected_uplink_packet_loss_fraction) { @@ -221,7 +221,7 @@ TEST(FecControllerPlrBasedTest, MaintainFecOffForLowBandwidth) { TEST(FecControllerPlrBasedTest, MaintainFecOffForVeryLowBandwidth) { auto states = CreateFecControllerPlrBased(false); - // Below |kEnablingBandwidthLow|, no packet loss fraction can cause FEC to + // Below `kEnablingBandwidthLow`, no packet loss fraction can cause FEC to // turn on. UpdateNetworkMetrics(&states, kEnablingBandwidthLow - 1, 1.0); CheckDecision(&states, false, 1.0); @@ -272,7 +272,7 @@ TEST(FecControllerPlrBasedTest, DisableFecForLowBandwidth) { TEST(FecControllerPlrBasedTest, DisableFecForVeryLowBandwidth) { auto states = CreateFecControllerPlrBased(true); - // Below |kEnablingBandwidthLow|, any packet loss fraction can cause FEC to + // Below `kEnablingBandwidthLow`, any packet loss fraction can cause FEC to // turn off. UpdateNetworkMetrics(&states, kDisablingBandwidthLow - 1, 1.0); CheckDecision(&states, false, 1.0); diff --git a/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc b/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc index 36e9eb9d9e..c47434f9aa 100644 --- a/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc +++ b/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc @@ -54,7 +54,7 @@ FrameLengthController::FrameLengthController(const Config& config) frame_length_ms_ = std::find(config_.encoder_frame_lengths_ms.begin(), config_.encoder_frame_lengths_ms.end(), config_.initial_frame_length_ms); - // |encoder_frame_lengths_ms| must contain |initial_frame_length_ms|. + // `encoder_frame_lengths_ms` must contain `initial_frame_length_ms`. RTC_DCHECK(frame_length_ms_ != config_.encoder_frame_lengths_ms.end()); } @@ -71,7 +71,7 @@ void FrameLengthController::UpdateNetworkMetrics( } void FrameLengthController::MakeDecision(AudioEncoderRuntimeConfig* config) { - // Decision on |frame_length_ms| should not have been made. + // Decision on `frame_length_ms` should not have been made. RTC_DCHECK(!config->frame_length_ms); if (FrameLengthIncreasingDecision(*config)) { @@ -99,12 +99,12 @@ bool FrameLengthController::Config::FrameLengthChange::operator<( bool FrameLengthController::FrameLengthIncreasingDecision( const AudioEncoderRuntimeConfig& config) { // Increase frame length if - // 1. |uplink_bandwidth_bps| is known to be smaller or equal than - // |min_encoder_bitrate_bps| plus |prevent_overuse_margin_bps| plus the + // 1. `uplink_bandwidth_bps` is known to be smaller or equal than + // `min_encoder_bitrate_bps` plus `prevent_overuse_margin_bps` plus the // current overhead rate OR all the following: // 2. longer frame length is available AND - // 3. |uplink_bandwidth_bps| is known to be smaller than a threshold AND - // 4. |uplink_packet_loss_fraction| is known to be smaller than a threshold. + // 3. `uplink_bandwidth_bps` is known to be smaller than a threshold AND + // 4. `uplink_packet_loss_fraction` is known to be smaller than a threshold. // Find next frame length to which a criterion is defined to shift from // current frame length. @@ -156,12 +156,12 @@ bool FrameLengthController::FrameLengthDecreasingDecision( const AudioEncoderRuntimeConfig& config) { // Decrease frame length if // 1. shorter frame length is available AND - // 2. |uplink_bandwidth_bps| is known to be bigger than - // |min_encoder_bitrate_bps| plus |prevent_overuse_margin_bps| plus the + // 2. `uplink_bandwidth_bps` is known to be bigger than + // `min_encoder_bitrate_bps` plus `prevent_overuse_margin_bps` plus the // overhead which would be produced with the shorter frame length AND // one or more of the followings: - // 3. |uplink_bandwidth_bps| is known to be larger than a threshold, - // 4. |uplink_packet_loss_fraction| is known to be larger than a threshold, + // 3. `uplink_bandwidth_bps` is known to be larger than a threshold, + // 4. `uplink_packet_loss_fraction` is known to be larger than a threshold, // Find next frame length to which a criterion is defined to shift from // current frame length. diff --git a/modules/audio_coding/audio_network_adaptor/frame_length_controller.h b/modules/audio_coding/audio_network_adaptor/frame_length_controller.h index 74a787e1c1..04693f8db7 100644 --- a/modules/audio_coding/audio_network_adaptor/frame_length_controller.h +++ b/modules/audio_coding/audio_network_adaptor/frame_length_controller.h @@ -19,7 +19,6 @@ #include "absl/types/optional.h" #include "modules/audio_coding/audio_network_adaptor/controller.h" #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -62,6 +61,9 @@ class FrameLengthController final : public Controller { ~FrameLengthController() override; + FrameLengthController(const FrameLengthController&) = delete; + FrameLengthController& operator=(const FrameLengthController&) = delete; + void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override; void MakeDecision(AudioEncoderRuntimeConfig* config) override; @@ -84,8 +86,6 @@ class FrameLengthController final : public Controller { // True if the previous frame length decision was an increase, otherwise // false. bool prev_decision_increase_ = false; - - RTC_DISALLOW_COPY_AND_ASSIGN(FrameLengthController); }; } // namespace webrtc diff --git a/modules/audio_coding/audio_network_adaptor/frame_length_controller_unittest.cc b/modules/audio_coding/audio_network_adaptor/frame_length_controller_unittest.cc index 0ffa54a1ed..23123934dc 100644 --- a/modules/audio_coding/audio_network_adaptor/frame_length_controller_unittest.cc +++ b/modules/audio_coding/audio_network_adaptor/frame_length_controller_unittest.cc @@ -184,8 +184,8 @@ TEST(FrameLengthControllerTest, TEST(FrameLengthControllerTest, IncreaseTo40MsOnMultipleConditions) { // Increase to 40ms frame length if - // 1. |uplink_bandwidth_bps| is known to be smaller than a threshold AND - // 2. |uplink_packet_loss_fraction| is known to be smaller than a threshold + // 1. `uplink_bandwidth_bps` is known to be smaller than a threshold AND + // 2. `uplink_packet_loss_fraction` is known to be smaller than a threshold // AND // 3. FEC is not decided or OFF. auto controller = CreateController(CreateChangeCriteriaFor20msAnd40ms(), @@ -206,8 +206,8 @@ TEST(FrameLengthControllerTest, DecreaseTo40MsOnHighUplinkBandwidth) { TEST(FrameLengthControllerTest, Maintain60MsOnMultipleConditions) { // Maintain 60ms frame length if - // 1. |uplink_bandwidth_bps| is at medium level, - // 2. |uplink_packet_loss_fraction| is at medium, + // 1. `uplink_bandwidth_bps` is at medium level, + // 2. `uplink_packet_loss_fraction` is at medium, // 3. FEC is not decided ON. auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(), kDefaultEncoderFrameLengthsMs, 60); @@ -218,8 +218,8 @@ TEST(FrameLengthControllerTest, Maintain60MsOnMultipleConditions) { TEST(FrameLengthControllerTest, IncreaseTo60MsOnMultipleConditions) { // Increase to 60ms frame length if - // 1. |uplink_bandwidth_bps| is known to be smaller than a threshold AND - // 2. |uplink_packet_loss_fraction| is known to be smaller than a threshold + // 1. `uplink_bandwidth_bps` is known to be smaller than a threshold AND + // 2. `uplink_packet_loss_fraction` is known to be smaller than a threshold // AND // 3. FEC is not decided or OFF. auto controller = CreateController(CreateChangeCriteriaFor20msAnd60ms(), @@ -365,8 +365,8 @@ TEST(FrameLengthControllerTest, From60MsTo120MsOnVeryLowUplinkBandwidth) { TEST(FrameLengthControllerTest, From20MsTo120MsOnMultipleConditions) { // Increase to 120ms frame length if - // 1. |uplink_bandwidth_bps| is known to be smaller than a threshold AND - // 2. |uplink_packet_loss_fraction| is known to be smaller than a threshold. + // 1. `uplink_bandwidth_bps` is known to be smaller than a threshold AND + // 2. `uplink_packet_loss_fraction` is known to be smaller than a threshold. auto controller = CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(), kDefaultEncoderFrameLengthsMs, 20); // It takes two steps for frame length to go from 20ms to 120ms. diff --git a/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h b/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h index 94e8ed961e..bd16292f7e 100644 --- a/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h +++ b/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h @@ -32,7 +32,7 @@ struct AudioEncoderRuntimeConfig { absl::optional enable_dtx; // Some encoders can encode fewer channels than the actual input to make - // better use of the bandwidth. |num_channels| sets the number of channels + // better use of the bandwidth. `num_channels` sets the number of channels // to encode. absl::optional num_channels; diff --git a/modules/audio_coding/codecs/builtin_audio_decoder_factory_unittest.cc b/modules/audio_coding/codecs/builtin_audio_decoder_factory_unittest.cc index 968c118c8e..82ed46aa01 100644 --- a/modules/audio_coding/codecs/builtin_audio_decoder_factory_unittest.cc +++ b/modules/audio_coding/codecs/builtin_audio_decoder_factory_unittest.cc @@ -104,9 +104,9 @@ TEST(AudioDecoderFactoryTest, CreateL16) { rtc::scoped_refptr adf = CreateBuiltinAudioDecoderFactory(); ASSERT_TRUE(adf); - // L16 supports any clock rate, any number of channels. + // L16 supports any clock rate and any number of channels up to 24. const int clockrates[] = {8000, 16000, 32000, 48000}; - const int num_channels[] = {1, 2, 3, 4711}; + const int num_channels[] = {1, 2, 3, 24}; for (int clockrate : clockrates) { EXPECT_FALSE(adf->MakeAudioDecoder(SdpAudioFormat("l16", clockrate, 0), absl::nullopt)); @@ -117,6 +117,34 @@ TEST(AudioDecoderFactoryTest, CreateL16) { } } +// Tests that using more channels than the maximum does not work +TEST(AudioDecoderFactoryTest, MaxNrOfChannels) { + rtc::scoped_refptr adf = + CreateBuiltinAudioDecoderFactory(); + std::vector codecs = { +#ifdef WEBRTC_CODEC_OPUS + "opus", +#endif +#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX) + "isac", +#endif +#ifdef WEBRTC_CODEC_ILBC + "ilbc", +#endif + "pcmu", + "pcma", + "l16", + "G722", + "G711", + }; + + for (auto codec : codecs) { + EXPECT_FALSE(adf->MakeAudioDecoder( + SdpAudioFormat(codec, 32000, AudioDecoder::kMaxNumberOfChannels + 1), + absl::nullopt)); + } +} + TEST(AudioDecoderFactoryTest, CreateG722) { rtc::scoped_refptr adf = CreateBuiltinAudioDecoderFactory(); diff --git a/modules/audio_coding/codecs/builtin_audio_encoder_factory_unittest.cc b/modules/audio_coding/codecs/builtin_audio_encoder_factory_unittest.cc index 108b1c17bf..26ae1eda8a 100644 --- a/modules/audio_coding/codecs/builtin_audio_encoder_factory_unittest.cc +++ b/modules/audio_coding/codecs/builtin_audio_encoder_factory_unittest.cc @@ -144,4 +144,35 @@ TEST(BuiltinAudioEncoderFactoryTest, SupportsTheExpectedFormats) { ASSERT_THAT(supported_formats, ElementsAreArray(expected_formats)); } + +// Tests that using more channels than the maximum does not work. +TEST(BuiltinAudioEncoderFactoryTest, MaxNrOfChannels) { + rtc::scoped_refptr aef = + CreateBuiltinAudioEncoderFactory(); + std::vector codecs = { +#ifdef WEBRTC_CODEC_OPUS + "opus", +#endif +#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX) + "isac", +#endif +#ifdef WEBRTC_CODEC_ILBC + "ilbc", +#endif + "pcmu", + "pcma", + "l16", + "G722", + "G711", + }; + + for (auto codec : codecs) { + EXPECT_FALSE(aef->MakeAudioEncoder( + /*payload_type=*/111, + /*format=*/ + SdpAudioFormat(codec, 32000, AudioEncoder::kMaxNumberOfChannels + 1), + /*codec_pair_id=*/absl::nullopt)); + } +} + } // namespace webrtc diff --git a/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc b/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc index 547feddbf9..c688004363 100644 --- a/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc +++ b/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc @@ -14,7 +14,6 @@ #include #include "common_audio/vad/mock/mock_vad.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/numerics/safe_conversions.h" #include "test/gtest.h" #include "test/mock_audio_encoder.h" @@ -50,6 +49,9 @@ class AudioEncoderCngTest : public ::testing::Test { EXPECT_CALL(*mock_encoder_, NumChannels()).WillRepeatedly(Return(1)); } + AudioEncoderCngTest(const AudioEncoderCngTest&) = delete; + AudioEncoderCngTest& operator=(const AudioEncoderCngTest&) = delete; + void TearDown() override { EXPECT_CALL(*mock_vad_, Die()).Times(1); cng_.reset(); @@ -92,8 +94,8 @@ class AudioEncoderCngTest : public ::testing::Test { timestamp_ += static_cast(num_audio_samples_10ms_); } - // Expect |num_calls| calls to the encoder, all successful. The last call - // claims to have encoded |kMockReturnEncodedBytes| bytes, and all the + // Expect `num_calls` calls to the encoder, all successful. The last call + // claims to have encoded `kMockReturnEncodedBytes` bytes, and all the // preceding ones 0 bytes. void ExpectEncodeCalls(size_t num_calls) { InSequence s; @@ -108,7 +110,7 @@ class AudioEncoderCngTest : public ::testing::Test { } // Verifies that the cng_ object waits until it has collected - // |blocks_per_frame| blocks of audio, and then dispatches all of them to + // `blocks_per_frame` blocks of audio, and then dispatches all of them to // the underlying codec (speech or cng). void CheckBlockGrouping(size_t blocks_per_frame, bool active_speech) { EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket()) @@ -169,7 +171,7 @@ class AudioEncoderCngTest : public ::testing::Test { .WillOnce(Return(Vad::kPassive)); } - // With this call to Encode(), |mock_vad_| should be called according to the + // With this call to Encode(), `mock_vad_` should be called according to the // above expectations. Encode(); } @@ -201,15 +203,13 @@ class AudioEncoderCngTest : public ::testing::Test { std::unique_ptr cng_; std::unique_ptr mock_encoder_owner_; MockAudioEncoder* mock_encoder_; - MockVad* mock_vad_; // Ownership is transferred to |cng_|. + MockVad* mock_vad_; // Ownership is transferred to `cng_`. uint32_t timestamp_; int16_t audio_[kMaxNumSamples]; size_t num_audio_samples_10ms_; rtc::Buffer encoded_; AudioEncoder::EncodedInfo encoded_info_; int sample_rate_hz_; - - RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderCngTest); }; TEST_F(AudioEncoderCngTest, CreateAndDestroy) { @@ -294,7 +294,7 @@ TEST_F(AudioEncoderCngTest, EncodePassive) { for (size_t i = 0; i < 100; ++i) { Encode(); // Check if it was time to call the cng encoder. This is done once every - // |kBlocksPerFrame| calls. + // `kBlocksPerFrame` calls. if ((i + 1) % kBlocksPerFrame == 0) { // Now check if a SID interval has elapsed. if ((i % (sid_frame_interval_ms / 10)) < kBlocksPerFrame) { @@ -334,7 +334,7 @@ TEST_F(AudioEncoderCngTest, MixedActivePassive) { EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive)); EXPECT_TRUE(encoded_info_.speech); - // All of the frame is passive speech. Expect no calls to |mock_encoder_|. + // All of the frame is passive speech. Expect no calls to `mock_encoder_`. EXPECT_FALSE(CheckMixedActivePassive(Vad::kPassive, Vad::kPassive)); EXPECT_FALSE(encoded_info_.speech); } @@ -442,7 +442,7 @@ class AudioEncoderCngDeathTest : public AudioEncoderCngTest { } // Override AudioEncoderCngTest::TearDown, since that one expects a call to - // the destructor of |mock_vad_|. In this case, that object is already + // the destructor of `mock_vad_`. In this case, that object is already // deleted. void TearDown() override { cng_.reset(); } diff --git a/modules/audio_coding/codecs/cng/webrtc_cng.cc b/modules/audio_coding/codecs/cng/webrtc_cng.cc index 2acaf2bdee..48f1b8c296 100644 --- a/modules/audio_coding/codecs/cng/webrtc_cng.cc +++ b/modules/audio_coding/codecs/cng/webrtc_cng.cc @@ -193,10 +193,10 @@ bool ComfortNoiseDecoder::Generate(rtc::ArrayView out_data, WebRtcSpl_ScaleVector(excitation, excitation, dec_used_scale_factor_, num_samples, 13); - /* |lpPoly| - Coefficients in Q12. - * |excitation| - Speech samples. - * |nst->dec_filtstate| - State preservation. - * |out_data| - Filtered speech samples. */ + /* `lpPoly` - Coefficients in Q12. + * `excitation` - Speech samples. + * `nst->dec_filtstate` - State preservation. + * `out_data` - Filtered speech samples. */ WebRtcSpl_FilterAR(lpPoly, WEBRTC_CNG_MAX_LPC_ORDER + 1, excitation, num_samples, dec_filtstate_, WEBRTC_CNG_MAX_LPC_ORDER, dec_filtstateLow_, WEBRTC_CNG_MAX_LPC_ORDER, @@ -395,7 +395,7 @@ size_t ComfortNoiseEncoder::Encode(rtc::ArrayView speech, } namespace { -/* Values in |k| are Q15, and |a| Q12. */ +/* Values in `k` are Q15, and `a` Q12. */ void WebRtcCng_K2a16(int16_t* k, int useOrder, int16_t* a) { int16_t any[WEBRTC_SPL_MAX_LPC_ORDER + 1]; int16_t* aptr; diff --git a/modules/audio_coding/codecs/cng/webrtc_cng.h b/modules/audio_coding/codecs/cng/webrtc_cng.h index 563f676510..7afd243f81 100644 --- a/modules/audio_coding/codecs/cng/webrtc_cng.h +++ b/modules/audio_coding/codecs/cng/webrtc_cng.h @@ -33,13 +33,13 @@ class ComfortNoiseDecoder { void Reset(); // Updates the CN state when a new SID packet arrives. - // |sid| is a view of the SID packet without the headers. + // `sid` is a view of the SID packet without the headers. void UpdateSid(rtc::ArrayView sid); // Generates comfort noise. - // |out_data| will be filled with samples - its size determines the number of - // samples generated. When |new_period| is true, CNG history will be reset - // before any audio is generated. Returns |false| if outData is too large - + // `out_data` will be filled with samples - its size determines the number of + // samples generated. When `new_period` is true, CNG history will be reset + // before any audio is generated. Returns `false` if outData is too large - // currently 640 bytes (equalling 10ms at 64kHz). // TODO(ossu): Specify better limits for the size of out_data. Either let it // be unbounded or limit to 10ms in the current sample rate. @@ -61,9 +61,9 @@ class ComfortNoiseDecoder { class ComfortNoiseEncoder { public: // Creates a comfort noise encoder. - // |fs| selects sample rate: 8000 for narrowband or 16000 for wideband. - // |interval| sets the interval at which to generate SID data (in ms). - // |quality| selects the number of refl. coeffs. Maximum allowed is 12. + // `fs` selects sample rate: 8000 for narrowband or 16000 for wideband. + // `interval` sets the interval at which to generate SID data (in ms). + // `quality` selects the number of refl. coeffs. Maximum allowed is 12. ComfortNoiseEncoder(int fs, int interval, int quality); ~ComfortNoiseEncoder() = default; @@ -74,8 +74,8 @@ class ComfortNoiseEncoder { // Parameters are set as during construction. void Reset(int fs, int interval, int quality); - // Analyzes background noise from |speech| and appends coefficients to - // |output|. Returns the number of coefficients generated. If |force_sid| is + // Analyzes background noise from `speech` and appends coefficients to + // `output`. Returns the number of coefficients generated. If `force_sid` is // true, a SID frame is forced and the internal sid interval counter is reset. // Will fail if the input size is too large (> 640 samples, see // ComfortNoiseDecoder::Generate). diff --git a/modules/audio_coding/codecs/g711/audio_decoder_pcm.h b/modules/audio_coding/codecs/g711/audio_decoder_pcm.h index 618591876d..3fa42cba30 100644 --- a/modules/audio_coding/codecs/g711/audio_decoder_pcm.h +++ b/modules/audio_coding/codecs/g711/audio_decoder_pcm.h @@ -19,7 +19,6 @@ #include "api/audio_codecs/audio_decoder.h" #include "rtc_base/buffer.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -28,6 +27,10 @@ class AudioDecoderPcmU final : public AudioDecoder { explicit AudioDecoderPcmU(size_t num_channels) : num_channels_(num_channels) { RTC_DCHECK_GE(num_channels, 1); } + + AudioDecoderPcmU(const AudioDecoderPcmU&) = delete; + AudioDecoderPcmU& operator=(const AudioDecoderPcmU&) = delete; + void Reset() override; std::vector ParsePayload(rtc::Buffer&& payload, uint32_t timestamp) override; @@ -44,7 +47,6 @@ class AudioDecoderPcmU final : public AudioDecoder { private: const size_t num_channels_; - RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderPcmU); }; class AudioDecoderPcmA final : public AudioDecoder { @@ -52,6 +54,10 @@ class AudioDecoderPcmA final : public AudioDecoder { explicit AudioDecoderPcmA(size_t num_channels) : num_channels_(num_channels) { RTC_DCHECK_GE(num_channels, 1); } + + AudioDecoderPcmA(const AudioDecoderPcmA&) = delete; + AudioDecoderPcmA& operator=(const AudioDecoderPcmA&) = delete; + void Reset() override; std::vector ParsePayload(rtc::Buffer&& payload, uint32_t timestamp) override; @@ -68,7 +74,6 @@ class AudioDecoderPcmA final : public AudioDecoder { private: const size_t num_channels_; - RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderPcmA); }; } // namespace webrtc diff --git a/modules/audio_coding/codecs/g711/audio_encoder_pcm.h b/modules/audio_coding/codecs/g711/audio_encoder_pcm.h index c4413f50a4..d50be4b457 100644 --- a/modules/audio_coding/codecs/g711/audio_encoder_pcm.h +++ b/modules/audio_coding/codecs/g711/audio_encoder_pcm.h @@ -17,7 +17,6 @@ #include "absl/types/optional.h" #include "api/audio_codecs/audio_encoder.h" #include "api/units/time_delta.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -83,6 +82,9 @@ class AudioEncoderPcmA final : public AudioEncoderPcm { explicit AudioEncoderPcmA(const Config& config) : AudioEncoderPcm(config, kSampleRateHz) {} + AudioEncoderPcmA(const AudioEncoderPcmA&) = delete; + AudioEncoderPcmA& operator=(const AudioEncoderPcmA&) = delete; + protected: size_t EncodeCall(const int16_t* audio, size_t input_len, @@ -94,7 +96,6 @@ class AudioEncoderPcmA final : public AudioEncoderPcm { private: static const int kSampleRateHz = 8000; - RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderPcmA); }; class AudioEncoderPcmU final : public AudioEncoderPcm { @@ -106,6 +107,9 @@ class AudioEncoderPcmU final : public AudioEncoderPcm { explicit AudioEncoderPcmU(const Config& config) : AudioEncoderPcm(config, kSampleRateHz) {} + AudioEncoderPcmU(const AudioEncoderPcmU&) = delete; + AudioEncoderPcmU& operator=(const AudioEncoderPcmU&) = delete; + protected: size_t EncodeCall(const int16_t* audio, size_t input_len, @@ -117,7 +121,6 @@ class AudioEncoderPcmU final : public AudioEncoderPcm { private: static const int kSampleRateHz = 8000; - RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderPcmU); }; } // namespace webrtc diff --git a/modules/audio_coding/codecs/g722/audio_decoder_g722.h b/modules/audio_coding/codecs/g722/audio_decoder_g722.h index 6911e0bad5..39e9e630be 100644 --- a/modules/audio_coding/codecs/g722/audio_decoder_g722.h +++ b/modules/audio_coding/codecs/g722/audio_decoder_g722.h @@ -12,7 +12,6 @@ #define MODULES_AUDIO_CODING_CODECS_G722_AUDIO_DECODER_G722_H_ #include "api/audio_codecs/audio_decoder.h" -#include "rtc_base/constructor_magic.h" typedef struct WebRtcG722DecInst G722DecInst; @@ -22,6 +21,10 @@ class AudioDecoderG722Impl final : public AudioDecoder { public: AudioDecoderG722Impl(); ~AudioDecoderG722Impl() override; + + AudioDecoderG722Impl(const AudioDecoderG722Impl&) = delete; + AudioDecoderG722Impl& operator=(const AudioDecoderG722Impl&) = delete; + bool HasDecodePlc() const override; void Reset() override; std::vector ParsePayload(rtc::Buffer&& payload, @@ -39,13 +42,17 @@ class AudioDecoderG722Impl final : public AudioDecoder { private: G722DecInst* dec_state_; - RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderG722Impl); }; class AudioDecoderG722StereoImpl final : public AudioDecoder { public: AudioDecoderG722StereoImpl(); ~AudioDecoderG722StereoImpl() override; + + AudioDecoderG722StereoImpl(const AudioDecoderG722StereoImpl&) = delete; + AudioDecoderG722StereoImpl& operator=(const AudioDecoderG722StereoImpl&) = + delete; + void Reset() override; std::vector ParsePayload(rtc::Buffer&& payload, uint32_t timestamp) override; @@ -60,18 +67,17 @@ class AudioDecoderG722StereoImpl final : public AudioDecoder { SpeechType* speech_type) override; private: - // Splits the stereo-interleaved payload in |encoded| into separate payloads + // Splits the stereo-interleaved payload in `encoded` into separate payloads // for left and right channels. The separated payloads are written to - // |encoded_deinterleaved|, which must hold at least |encoded_len| samples. + // `encoded_deinterleaved`, which must hold at least `encoded_len` samples. // The left channel starts at offset 0, while the right channel starts at - // offset encoded_len / 2 into |encoded_deinterleaved|. + // offset encoded_len / 2 into `encoded_deinterleaved`. void SplitStereoPacket(const uint8_t* encoded, size_t encoded_len, uint8_t* encoded_deinterleaved); G722DecInst* dec_state_left_; G722DecInst* dec_state_right_; - RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderG722StereoImpl); }; } // namespace webrtc diff --git a/modules/audio_coding/codecs/g722/audio_encoder_g722.h b/modules/audio_coding/codecs/g722/audio_encoder_g722.h index c836503f2b..a932aa8b7d 100644 --- a/modules/audio_coding/codecs/g722/audio_encoder_g722.h +++ b/modules/audio_coding/codecs/g722/audio_encoder_g722.h @@ -20,7 +20,6 @@ #include "api/units/time_delta.h" #include "modules/audio_coding/codecs/g722/g722_interface.h" #include "rtc_base/buffer.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -29,6 +28,9 @@ class AudioEncoderG722Impl final : public AudioEncoder { AudioEncoderG722Impl(const AudioEncoderG722Config& config, int payload_type); ~AudioEncoderG722Impl() override; + AudioEncoderG722Impl(const AudioEncoderG722Impl&) = delete; + AudioEncoderG722Impl& operator=(const AudioEncoderG722Impl&) = delete; + int SampleRateHz() const override; size_t NumChannels() const override; int RtpTimestampRateHz() const override; @@ -63,7 +65,6 @@ class AudioEncoderG722Impl final : public AudioEncoder { uint32_t first_timestamp_in_buffer_; const std::unique_ptr encoders_; rtc::Buffer interleave_buffer_; - RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderG722Impl); }; } // namespace webrtc diff --git a/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h b/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h index c2d62ed2d1..46ba755148 100644 --- a/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h +++ b/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h @@ -18,7 +18,6 @@ #include "api/audio_codecs/audio_decoder.h" #include "rtc_base/buffer.h" -#include "rtc_base/constructor_magic.h" typedef struct iLBC_decinst_t_ IlbcDecoderInstance; @@ -28,6 +27,10 @@ class AudioDecoderIlbcImpl final : public AudioDecoder { public: AudioDecoderIlbcImpl(); ~AudioDecoderIlbcImpl() override; + + AudioDecoderIlbcImpl(const AudioDecoderIlbcImpl&) = delete; + AudioDecoderIlbcImpl& operator=(const AudioDecoderIlbcImpl&) = delete; + bool HasDecodePlc() const override; size_t DecodePlc(size_t num_frames, int16_t* decoded) override; void Reset() override; @@ -45,7 +48,6 @@ class AudioDecoderIlbcImpl final : public AudioDecoder { private: IlbcDecoderInstance* dec_state_; - RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderIlbcImpl); }; } // namespace webrtc diff --git a/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h b/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h index 05a900e3c4..c8dfa2ca6d 100644 --- a/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h +++ b/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h @@ -21,7 +21,6 @@ #include "api/audio_codecs/ilbc/audio_encoder_ilbc_config.h" #include "api/units/time_delta.h" #include "modules/audio_coding/codecs/ilbc/ilbc.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -30,6 +29,9 @@ class AudioEncoderIlbcImpl final : public AudioEncoder { AudioEncoderIlbcImpl(const AudioEncoderIlbcConfig& config, int payload_type); ~AudioEncoderIlbcImpl() override; + AudioEncoderIlbcImpl(const AudioEncoderIlbcImpl&) = delete; + AudioEncoderIlbcImpl& operator=(const AudioEncoderIlbcImpl&) = delete; + int SampleRateHz() const override; size_t NumChannels() const override; size_t Num10MsFramesInNextPacket() const override; @@ -53,7 +55,6 @@ class AudioEncoderIlbcImpl final : public AudioEncoder { uint32_t first_timestamp_in_buffer_; int16_t input_buffer_[kMaxSamplesPerPacket]; IlbcEncoderInstance* encoder_; - RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderIlbcImpl); }; } // namespace webrtc diff --git a/modules/audio_coding/codecs/ilbc/create_augmented_vec.c b/modules/audio_coding/codecs/ilbc/create_augmented_vec.c index 8033c959b5..7e21faee6c 100644 --- a/modules/audio_coding/codecs/ilbc/create_augmented_vec.c +++ b/modules/audio_coding/codecs/ilbc/create_augmented_vec.c @@ -39,7 +39,7 @@ void WebRtcIlbcfix_CreateAugmentedVec( const int16_t *ppo, *ppi; int16_t cbVecTmp[4]; /* Interpolation starts 4 elements before cbVec+index, but must not start - outside |cbVec|; clamping interp_len to stay within |cbVec|. + outside `cbVec`; clamping interp_len to stay within `cbVec`. */ size_t interp_len = WEBRTC_SPL_MIN(index, 4); @@ -69,12 +69,12 @@ void WebRtcIlbcfix_CreateAugmentedVec( /* copy the second noninterpolated part */ ppo = buffer - index; - /* |tempbuff2| is declared in WebRtcIlbcfix_GetCbVec and is SUBL+5 elements - long. |buffer| points one element past the end of that vector, i.e., at + /* `tempbuff2` is declared in WebRtcIlbcfix_GetCbVec and is SUBL+5 elements + long. `buffer` points one element past the end of that vector, i.e., at tempbuff2+SUBL+5. Since ppo=buffer-index, we cannot read any more than - |index| elements from |ppo|. + `index` elements from `ppo`. - |cbVec| is declared to be SUBL elements long in WebRtcIlbcfix_CbConstruct. + `cbVec` is declared to be SUBL elements long in WebRtcIlbcfix_CbConstruct. Therefore, we can only write SUBL-index elements to cbVec+index. These two conditions limit the number of elements to copy. diff --git a/modules/audio_coding/codecs/ilbc/get_cd_vec.c b/modules/audio_coding/codecs/ilbc/get_cd_vec.c index 145cb964d7..e9cd2008e0 100644 --- a/modules/audio_coding/codecs/ilbc/get_cd_vec.c +++ b/modules/audio_coding/codecs/ilbc/get_cd_vec.c @@ -99,7 +99,7 @@ bool WebRtcIlbcfix_GetCbVec( // We're going to fill in cbveclen + 5 elements of tempbuff2 in // WebRtcSpl_FilterMAFastQ12, less than the SUBL + 5 elements we'll be // using in WebRtcIlbcfix_CreateAugmentedVec. This error is caused by - // bad values in |index| (which come from the encoded stream). Tell the + // bad values in `index` (which come from the encoded stream). Tell the // caller that things went south, and that the decoder state is now // corrupt (because it's half-way through an update that we can't // complete). diff --git a/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h b/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h index 23a302018f..aae708f295 100644 --- a/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h +++ b/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h @@ -16,7 +16,6 @@ #include "absl/types/optional.h" #include "api/audio_codecs/audio_decoder.h" #include "api/scoped_refptr.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -30,6 +29,9 @@ class AudioDecoderIsacT final : public AudioDecoder { explicit AudioDecoderIsacT(const Config& config); virtual ~AudioDecoderIsacT() override; + AudioDecoderIsacT(const AudioDecoderIsacT&) = delete; + AudioDecoderIsacT& operator=(const AudioDecoderIsacT&) = delete; + bool HasDecodePlc() const override; size_t DecodePlc(size_t num_frames, int16_t* decoded) override; void Reset() override; @@ -45,8 +47,6 @@ class AudioDecoderIsacT final : public AudioDecoder { private: typename T::instance_type* isac_state_; int sample_rate_hz_; - - RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderIsacT); }; } // namespace webrtc diff --git a/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h b/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h index 8bde0e34ad..c382ea076e 100644 --- a/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h +++ b/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h @@ -18,7 +18,6 @@ #include "api/audio_codecs/audio_encoder.h" #include "api/scoped_refptr.h" #include "api/units/time_delta.h" -#include "rtc_base/constructor_magic.h" #include "system_wrappers/include/field_trial.h" namespace webrtc { @@ -44,6 +43,9 @@ class AudioEncoderIsacT final : public AudioEncoder { explicit AudioEncoderIsacT(const Config& config); ~AudioEncoderIsacT() override; + AudioEncoderIsacT(const AudioEncoderIsacT&) = delete; + AudioEncoderIsacT& operator=(const AudioEncoderIsacT&) = delete; + int SampleRateHz() const override; size_t NumChannels() const override; size_t Num10MsFramesInNextPacket() const override; @@ -99,8 +101,6 @@ class AudioEncoderIsacT final : public AudioEncoder { // Start out with a reasonable default that we can use until we receive a real // value. DataSize overhead_per_packet_ = DataSize::Bytes(28); - - RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderIsacT); }; } // namespace webrtc diff --git a/modules/audio_coding/codecs/isac/fix/source/arith_routins.h b/modules/audio_coding/codecs/isac/fix/source/arith_routins.h index cc4ed555cf..d112bfe7f2 100644 --- a/modules/audio_coding/codecs/isac/fix/source/arith_routins.h +++ b/modules/audio_coding/codecs/isac/fix/source/arith_routins.h @@ -38,7 +38,7 @@ int WebRtcIsacfix_EncLogisticMulti2(Bitstr_enc* streamData, int16_t* dataQ7, const uint16_t* env, - const int16_t lenData); + int16_t lenData); /**************************************************************************** * WebRtcIsacfix_EncTerminate(...) @@ -73,7 +73,7 @@ int16_t WebRtcIsacfix_EncTerminate(Bitstr_enc* streamData); int WebRtcIsacfix_DecLogisticMulti2(int16_t* data, Bitstr_dec* streamData, const int32_t* env, - const int16_t lenData); + int16_t lenData); /**************************************************************************** * WebRtcIsacfix_EncHistMulti(...) @@ -92,7 +92,7 @@ int WebRtcIsacfix_DecLogisticMulti2(int16_t* data, int WebRtcIsacfix_EncHistMulti(Bitstr_enc* streamData, const int16_t* data, const uint16_t* const* cdf, - const int16_t lenData); + int16_t lenData); /**************************************************************************** * WebRtcIsacfix_DecHistBisectMulti(...) @@ -118,7 +118,7 @@ int16_t WebRtcIsacfix_DecHistBisectMulti(int16_t* data, Bitstr_dec* streamData, const uint16_t* const* cdf, const uint16_t* cdfSize, - const int16_t lenData); + int16_t lenData); /**************************************************************************** * WebRtcIsacfix_DecHistOneStepMulti(...) @@ -144,6 +144,6 @@ int16_t WebRtcIsacfix_DecHistOneStepMulti(int16_t* data, Bitstr_dec* streamData, const uint16_t* const* cdf, const uint16_t* initIndex, - const int16_t lenData); + int16_t lenData); #endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ARITH_ROUTINS_H_ */ diff --git a/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h b/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h index ebb74d6c49..f106746f14 100644 --- a/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h +++ b/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h @@ -56,17 +56,17 @@ int32_t WebRtcIsacfix_InitBandwidthEstimator(BwEstimatorstr* bwest_str); */ int32_t WebRtcIsacfix_UpdateUplinkBwImpl(BwEstimatorstr* bwest_str, - const uint16_t rtp_number, - const int16_t frameSize, - const uint32_t send_ts, - const uint32_t arr_ts, - const size_t pksize, - const uint16_t Index); + uint16_t rtp_number, + int16_t frameSize, + uint32_t send_ts, + uint32_t arr_ts, + size_t pksize, + uint16_t Index); /* Update receiving estimates. Used when we only receive BWE index, no iSAC data * packet. */ int16_t WebRtcIsacfix_UpdateUplinkBwRec(BwEstimatorstr* bwest_str, - const int16_t Index); + int16_t Index); /**************************************************************************** * WebRtcIsacfix_GetDownlinkBwIndexImpl(...) @@ -100,19 +100,19 @@ int16_t WebRtcIsacfix_GetUplinkMaxDelay(const BwEstimatorstr* bwest_str); */ uint16_t WebRtcIsacfix_GetMinBytes( RateModel* State, - int16_t StreamSize, /* bytes in bitstream */ - const int16_t FrameLen, /* ms per frame */ - const int16_t BottleNeck, /* bottle neck rate; excl headers (bps) */ - const int16_t DelayBuildUp); /* max delay from bottle neck buffering (ms) */ + int16_t StreamSize, /* bytes in bitstream */ + int16_t FrameLen, /* ms per frame */ + int16_t BottleNeck, /* bottle neck rate; excl headers (bps) */ + int16_t DelayBuildUp); /* max delay from bottle neck buffering (ms) */ /* * update long-term average bitrate and amount of data in buffer */ void WebRtcIsacfix_UpdateRateModel( RateModel* State, - int16_t StreamSize, /* bytes in bitstream */ - const int16_t FrameSamples, /* samples per frame */ - const int16_t BottleNeck); /* bottle neck rate; excl headers (bps) */ + int16_t StreamSize, /* bytes in bitstream */ + int16_t FrameSamples, /* samples per frame */ + int16_t BottleNeck); /* bottle neck rate; excl headers (bps) */ void WebRtcIsacfix_InitRateModel(RateModel* State); diff --git a/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c b/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c index cb154456d4..842e77f47e 100644 --- a/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c +++ b/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c @@ -831,15 +831,15 @@ It first shifts input data of one matrix, determines the right indexes for the two matrixes, multiply them, and write the results into an output buffer. Note that two factors (or, multipliers) determine the initialization values of -the variable |matrix1_index| in the code. The relationship is -|matrix1_index| = |matrix1_index_factor1| * |matrix1_index_factor2|, where -|matrix1_index_factor1| is given by the argument while |matrix1_index_factor2| -is determined by the value of argument |matrix1_index_init_case|; -|matrix1_index_factor2| is the value of the outmost loop counter j (when -|matrix1_index_init_case| is 0), or the value of the middle loop counter k (when -|matrix1_index_init_case| is non-zero). - -|matrix0_index| is determined the same way. +the variable `matrix1_index` in the code. The relationship is +`matrix1_index` = `matrix1_index_factor1` * `matrix1_index_factor2`, where +`matrix1_index_factor1` is given by the argument while `matrix1_index_factor2` +is determined by the value of argument `matrix1_index_init_case`; +`matrix1_index_factor2` is the value of the outmost loop counter j (when +`matrix1_index_init_case` is 0), or the value of the middle loop counter k (when +`matrix1_index_init_case` is non-zero). + +`matrix0_index` is determined the same way. Arguments: matrix0[]: matrix0 data in Q15 domain. diff --git a/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h b/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h index b4251cee1e..ae11394f7c 100644 --- a/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h +++ b/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h @@ -101,19 +101,19 @@ void WebRtcIsacfix_TranscodeLpcCoef(int32_t* tmpcoeffs_gQ6, int16_t* index_gQQ); typedef void (*MatrixProduct1)(const int16_t matrix0[], const int32_t matrix1[], int32_t matrix_product[], - const int matrix1_index_factor1, - const int matrix0_index_factor1, - const int matrix1_index_init_case, - const int matrix1_index_step, - const int matrix0_index_step, - const int inner_loop_count, - const int mid_loop_count, - const int shift); + int matrix1_index_factor1, + int matrix0_index_factor1, + int matrix1_index_init_case, + int matrix1_index_step, + int matrix0_index_step, + int inner_loop_count, + int mid_loop_count, + int shift); typedef void (*MatrixProduct2)(const int16_t matrix0[], const int32_t matrix1[], int32_t matrix_product[], - const int matrix0_index_factor, - const int matrix0_index_step); + int matrix0_index_factor, + int matrix0_index_step); extern MatrixProduct1 WebRtcIsacfix_MatrixProduct1; extern MatrixProduct2 WebRtcIsacfix_MatrixProduct2; @@ -121,57 +121,57 @@ extern MatrixProduct2 WebRtcIsacfix_MatrixProduct2; void WebRtcIsacfix_MatrixProduct1C(const int16_t matrix0[], const int32_t matrix1[], int32_t matrix_product[], - const int matrix1_index_factor1, - const int matrix0_index_factor1, - const int matrix1_index_init_case, - const int matrix1_index_step, - const int matrix0_index_step, - const int inner_loop_count, - const int mid_loop_count, - const int shift); + int matrix1_index_factor1, + int matrix0_index_factor1, + int matrix1_index_init_case, + int matrix1_index_step, + int matrix0_index_step, + int inner_loop_count, + int mid_loop_count, + int shift); void WebRtcIsacfix_MatrixProduct2C(const int16_t matrix0[], const int32_t matrix1[], int32_t matrix_product[], - const int matrix0_index_factor, - const int matrix0_index_step); + int matrix0_index_factor, + int matrix0_index_step); #if defined(WEBRTC_HAS_NEON) void WebRtcIsacfix_MatrixProduct1Neon(const int16_t matrix0[], const int32_t matrix1[], int32_t matrix_product[], - const int matrix1_index_factor1, - const int matrix0_index_factor1, - const int matrix1_index_init_case, - const int matrix1_index_step, - const int matrix0_index_step, - const int inner_loop_count, - const int mid_loop_count, - const int shift); + int matrix1_index_factor1, + int matrix0_index_factor1, + int matrix1_index_init_case, + int matrix1_index_step, + int matrix0_index_step, + int inner_loop_count, + int mid_loop_count, + int shift); void WebRtcIsacfix_MatrixProduct2Neon(const int16_t matrix0[], const int32_t matrix1[], int32_t matrix_product[], - const int matrix0_index_factor, - const int matrix0_index_step); + int matrix0_index_factor, + int matrix0_index_step); #endif #if defined(MIPS32_LE) void WebRtcIsacfix_MatrixProduct1MIPS(const int16_t matrix0[], const int32_t matrix1[], int32_t matrix_product[], - const int matrix1_index_factor1, - const int matrix0_index_factor1, - const int matrix1_index_init_case, - const int matrix1_index_step, - const int matrix0_index_step, - const int inner_loop_count, - const int mid_loop_count, - const int shift); + int matrix1_index_factor1, + int matrix0_index_factor1, + int matrix1_index_init_case, + int matrix1_index_step, + int matrix0_index_step, + int inner_loop_count, + int mid_loop_count, + int shift); void WebRtcIsacfix_MatrixProduct2MIPS(const int16_t matrix0[], const int32_t matrix1[], int32_t matrix_product[], - const int matrix0_index_factor, - const int matrix0_index_step); + int matrix0_index_factor, + int matrix0_index_step); #endif #endif // MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ENTROPY_CODING_H_ diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h b/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h index 6b99914b64..f741e6f677 100644 --- a/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h +++ b/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h @@ -46,7 +46,7 @@ typedef void (*AllpassFilter2FixDec16)( int16_t* data_ch2, // Input and output in channel 2, in Q0 const int16_t* factor_ch1, // Scaling factor for channel 1, in Q15 const int16_t* factor_ch2, // Scaling factor for channel 2, in Q15 - const int length, // Length of the data buffers + int length, // Length of the data buffers int32_t* filter_state_ch1, // Filter state for channel 1, in Q16 int32_t* filter_state_ch2); // Filter state for channel 2, in Q16 extern AllpassFilter2FixDec16 WebRtcIsacfix_AllpassFilter2FixDec16; @@ -55,7 +55,7 @@ void WebRtcIsacfix_AllpassFilter2FixDec16C(int16_t* data_ch1, int16_t* data_ch2, const int16_t* factor_ch1, const int16_t* factor_ch2, - const int length, + int length, int32_t* filter_state_ch1, int32_t* filter_state_ch2); @@ -64,7 +64,7 @@ void WebRtcIsacfix_AllpassFilter2FixDec16Neon(int16_t* data_ch1, int16_t* data_ch2, const int16_t* factor_ch1, const int16_t* factor_ch2, - const int length, + int length, int32_t* filter_state_ch1, int32_t* filter_state_ch2); #endif @@ -74,7 +74,7 @@ void WebRtcIsacfix_AllpassFilter2FixDec16MIPS(int16_t* data_ch1, int16_t* data_ch2, const int16_t* factor_ch1, const int16_t* factor_ch2, - const int length, + int length, int32_t* filter_state_ch1, int32_t* filter_state_ch2); #endif diff --git a/modules/audio_coding/codecs/isac/fix/source/filters.c b/modules/audio_coding/codecs/isac/fix/source/filters.c index 85860f72f4..838ba4b3e8 100644 --- a/modules/audio_coding/codecs/isac/fix/source/filters.c +++ b/modules/audio_coding/codecs/isac/fix/source/filters.c @@ -75,7 +75,7 @@ static void AllpassFilterForDec32(int16_t *InOut16, //Q0 a = WEBRTC_SPL_MUL_16_32_RSFT16(InOut16[n], APSectionFactors[j]); //Q0*Q31=Q31 shifted 16 gives Q15 a <<= 1; // Q15 -> Q16 b = WebRtcSpl_AddSatW32(a, FilterState[j]); //Q16+Q16=Q16 - // |a| in Q15 (Q0*Q31=Q31 shifted 16 gives Q15). + // `a` in Q15 (Q0*Q31=Q31 shifted 16 gives Q15). a = WEBRTC_SPL_MUL_16_32_RSFT16(b >> 16, -APSectionFactors[j]); // FilterState[j]: Q15<<1 + Q0<<16 = Q16 + Q16 = Q16 FilterState[j] = WebRtcSpl_AddSatW32(a << 1, (uint32_t)InOut16[n] << 16); diff --git a/modules/audio_coding/codecs/isac/fix/source/isacfix.c b/modules/audio_coding/codecs/isac/fix/source/isacfix.c index 067d8f358f..9a66591de1 100644 --- a/modules/audio_coding/codecs/isac/fix/source/isacfix.c +++ b/modules/audio_coding/codecs/isac/fix/source/isacfix.c @@ -34,7 +34,7 @@ Time2Spec WebRtcIsacfix_Time2Spec; MatrixProduct1 WebRtcIsacfix_MatrixProduct1; MatrixProduct2 WebRtcIsacfix_MatrixProduct2; -/* This method assumes that |stream_size_bytes| is in valid range, +/* This method assumes that `stream_size_bytes` is in valid range, * i.e. >= 0 && <= STREAM_MAXW16_60MS */ static void InitializeDecoderBitstream(size_t stream_size_bytes, @@ -294,8 +294,8 @@ int16_t WebRtcIsacfix_EncoderInit(ISACFIX_MainStruct *ISAC_main_inst, return statusInit; } -/* Read the given number of bytes of big-endian 16-bit integers from |src| and - write them to |dest| in host endian. If |nbytes| is odd, the number of +/* Read the given number of bytes of big-endian 16-bit integers from `src` and + write them to `dest` in host endian. If `nbytes` is odd, the number of output elements is rounded up, and the least significant byte of the last element is set to 0. */ static void read_be16(const uint8_t* src, size_t nbytes, uint16_t* dest) { @@ -306,8 +306,8 @@ static void read_be16(const uint8_t* src, size_t nbytes, uint16_t* dest) { dest[nbytes / 2] = src[nbytes - 1] << 8; } -/* Read the given number of bytes of host-endian 16-bit integers from |src| and - write them to |dest| in big endian. If |nbytes| is odd, the number of source +/* Read the given number of bytes of host-endian 16-bit integers from `src` and + write them to `dest` in big endian. If `nbytes` is odd, the number of source elements is rounded up (but only the most significant byte of the last element is used), and the number of output bytes written will be nbytes + 1. */ diff --git a/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c index b538085d14..f151cd1c88 100644 --- a/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c +++ b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c @@ -663,7 +663,7 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0, /* less noise for lower frequencies, by filtering/scaling autocorrelation sequences */ /* Calculate corrlo2[0] = tmpQQlo * corrlo[0] - 2.0*tmpQQlo * corrlo[1];*/ - // |corrlo2QQ| in Q(QdomLO-5). + // `corrlo2QQ` in Q(QdomLO-5). corrlo2QQ[0] = (WEBRTC_SPL_MUL_16_32_RSFT16(tmpQQlo, corrloQQ[0]) >> 1) - (WEBRTC_SPL_MUL_16_32_RSFT16(aaQ14, corrloQQ[1]) >> 2); @@ -721,12 +721,12 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0, tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha, tmp); } else if ((sh-shMem)<7){ tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], shMem); // Shift up CorrBufLoQQ as much as possible - // Shift |alpha| the number of times required to get |tmp| in QdomLO. + // Shift `alpha` the number of times required to get `tmp` in QdomLO. tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << (sh - shMem), tmp); } else { tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible - // Shift |alpha| as much as possible without overflow the number of - // times required to get |tmp| in QdomLO. + // Shift `alpha` as much as possible without overflow the number of + // times required to get `tmp` in QdomLO. tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << 6, tmp); tmpCorr = corrloQQ[n] >> (sh - shMem - 6); tmp = tmp + tmpCorr; @@ -774,7 +774,7 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0, maskdata->CorrBufHiQdom[n] = QdomHI; } else if ((sh-shMem)<7) { tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible - // Shift |alpha| the number of times required to get |tmp| in QdomHI. + // Shift `alpha` the number of times required to get `tmp` in QdomHI. tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << (sh - shMem), tmp); tmpCorr = corrhiQQ[n]; tmp = tmp + tmpCorr; @@ -782,8 +782,8 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0, maskdata->CorrBufHiQdom[n] = QdomHI; } else { tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible - // Shift |alpha| as much as possible without overflow the number of - // times required to get |tmp| in QdomHI. + // Shift `alpha` as much as possible without overflow the number of + // times required to get `tmp` in QdomHI. tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << 6, tmp); tmpCorr = corrhiQQ[n] >> (sh - shMem - 6); tmp = tmp + tmpCorr; @@ -919,7 +919,7 @@ void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0, tmp32a = varscaleQ14 >> 1; // H_T_HQ19=65536 (16-17=-1) - ssh = sh_hi >> 1; // |sqrt_nrg| is in Qssh. + ssh = sh_hi >> 1; // `sqrt_nrg` is in Qssh. sh = ssh - 14; tmp32b = WEBRTC_SPL_SHIFT_W32(tmp32a, sh); // Q14->Qssh tmp32c = sqrt_nrg + tmp32b; // Qssh (denominator) diff --git a/modules/audio_coding/codecs/isac/fix/test/kenny.cc b/modules/audio_coding/codecs/isac/fix/test/kenny.cc deleted file mode 100644 index 4b431cfdf9..0000000000 --- a/modules/audio_coding/codecs/isac/fix/test/kenny.cc +++ /dev/null @@ -1,883 +0,0 @@ -/* - * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include -#include -#include - -#include "modules/audio_coding/codecs/isac/fix/include/isacfix.h" -#include "test/gtest.h" -#include "test/testsupport/perf_test.h" - -// TODO(kma): Clean up the code and change benchmarking the whole codec to -// separate encoder and decoder. - -/* Defines */ -#define SEED_FILE \ - "randseed.txt" /* Used when running decoder on garbage data \ - */ -#define MAX_FRAMESAMPLES \ - 960 /* max number of samples per frame (= 60 ms frame) */ -#define FRAMESAMPLES_10ms 160 /* number of samples per 10ms frame */ -#define FS 16000 /* sampling frequency (Hz) */ - -/* Function for reading audio data from PCM file */ -int readframe(int16_t* data, FILE* inp, int length) { - short k, rlen, status = 0; - - rlen = fread(data, sizeof(int16_t), length, inp); - if (rlen < length) { - for (k = rlen; k < length; k++) - data[k] = 0; - status = 1; - } - - return status; -} - -// Globals needed because gtest does not provide access to argv. -// This should be reworked to use flags. -static int global_argc; -static char** global_argv; - -/* Struct for bottleneck model */ -typedef struct { - uint32_t send_time; /* samples */ - uint32_t arrival_time; /* samples */ - uint32_t sample_count; /* samples */ - uint16_t rtp_number; -} BottleNeckModel; - -void get_arrival_time(int current_framesamples, /* samples */ - size_t packet_size, /* bytes */ - int bottleneck, /* excluding headers; bits/s */ - BottleNeckModel* BN_data) { - const int HeaderSize = 35; - int HeaderRate; - - HeaderRate = HeaderSize * 8 * FS / current_framesamples; /* bits/s */ - - /* everything in samples */ - BN_data->sample_count = BN_data->sample_count + current_framesamples; - - BN_data->arrival_time += static_cast( - ((packet_size + HeaderSize) * 8 * FS) / (bottleneck + HeaderRate)); - BN_data->send_time += current_framesamples; - - if (BN_data->arrival_time < BN_data->sample_count) - BN_data->arrival_time = BN_data->sample_count; - - BN_data->rtp_number++; -} - -void get_arrival_time2(int current_framesamples, - int current_delay, - BottleNeckModel* BN_data) { - if (current_delay == -1) - // dropped packet - { - BN_data->arrival_time += current_framesamples; - } else if (current_delay != -2) { - // - BN_data->arrival_time += - (current_framesamples + ((FS / 1000) * current_delay)); - } - // else - // current packet has same timestamp as previous packet - - BN_data->rtp_number++; -} - -TEST(IsacFixTest, Kenny) { - int argc = global_argc; - char** argv = global_argv; - - char inname[100], outname[100], outbitsname[100], bottleneck_file[100]; - FILE *inp, *outp, *f_bn, *outbits; - int endfile; - - const char* perf_result_file = NULL; - - int i; - int errtype, h = 0, k, packetLossPercent = 0; - int16_t CodingMode; - int16_t bottleneck; - int framesize = 30; /* ms */ - int cur_framesmpls, err = 0, lostPackets = 0; - - /* Runtime statistics */ - double starttime, runtime, length_file; - - int stream_len_int = 0; - size_t stream_len = 0; - int16_t framecnt; - int declen = 0; - int16_t shortdata[FRAMESAMPLES_10ms]; - int16_t decoded[MAX_FRAMESAMPLES]; - uint16_t streamdata[500]; - int16_t speechType[1]; - size_t prevFrameSize = 1; - int16_t rateBPS = 0; - int16_t fixedFL = 0; - int16_t payloadSize = 0; - int32_t payloadRate = 0; - int setControlBWE = 0; - int readLoss; - FILE* plFile = NULL; - - char version_number[20]; - char tmpBit[5] = ".bit"; - - int totalbits = 0; - int totalsmpls = 0; - int16_t testNum, testCE; - - FILE* fp_gns = NULL; - int gns = 0; - int cur_delay = 0; - char gns_file[100]; - - int nbTest = 0; - int16_t lostFrame; - float scale = (float)0.7; - /* only one structure used for ISAC encoder */ - ISACFIX_MainStruct* ISAC_main_inst = NULL; - - /* For fault test 10, garbage data */ - FILE* seedfile; - unsigned int random_seed = (unsigned int)time(NULL); // 1196764538 - - BottleNeckModel BN_data; - f_bn = NULL; - - readLoss = 0; - packetLossPercent = 0; - - /* Handling wrong input arguments in the command line */ - if ((argc < 3) || (argc > 22)) { - printf("\n\nWrong number of arguments or flag values.\n\n"); - - printf("\n"); - WebRtcIsacfix_version(version_number); - printf("iSAC version %s \n\n", version_number); - - printf("Usage:\n\n"); - printf("%s [-I] bottleneck_value infile outfile [-F num]\n\n", argv[0]); - printf("with:\n"); - printf("[-I] :if -I option is specified, the coder will use\n"); - printf(" an instantaneous Bottleneck value. If not, it\n"); - printf(" will be an adaptive Bottleneck value.\n\n"); - printf("bottleneck_value :the value of the bottleneck provided either\n"); - printf(" as a fixed value (e.g. 25000) or\n"); - printf(" read from a file (e.g. bottleneck.txt)\n\n"); - printf("infile :Normal speech input file\n\n"); - printf("outfile :Speech output file\n\n"); - printf( - "[-INITRATE num] :Set a new value for initial rate. Note! Only used" - " in adaptive mode.\n\n"); - printf( - "[-FL num] :Set (initial) frame length in msec. Valid length" - " are 30 and 60 msec.\n\n"); - printf("[-FIXED_FL] :Frame length to be fixed to initial value.\n\n"); - printf( - "[-MAX num] :Set the limit for the payload size of iSAC" - " in bytes. \n"); - printf(" Minimum 100, maximum 400.\n\n"); - printf("[-MAXRATE num] :Set the maxrate for iSAC in bits per second. \n"); - printf(" Minimum 32000, maximum 53400.\n\n"); - printf("[-F num] :if -F option is specified, the test function\n"); - printf( - " will run the iSAC API fault scenario specified" - " by the\n"); - printf(" supplied number.\n"); - printf(" F 1 - Call encoder prior to init encoder call\n"); - printf(" F 2 - Call decoder prior to init decoder call\n"); - printf(" F 3 - Call decoder prior to encoder call\n"); - printf( - " F 4 - Call decoder with a too short coded" - " sequence\n"); - printf( - " F 5 - Call decoder with a too long coded" - " sequence\n"); - printf(" F 6 - Call decoder with random bit stream\n"); - printf( - " F 7 - Call init encoder/decoder at random" - " during a call\n"); - printf( - " F 8 - Call encoder/decoder without having" - " allocated memory for \n"); - printf(" encoder/decoder instance\n"); - printf(" F 9 - Call decodeB without calling decodeA\n"); - printf(" F 10 - Call decodeB with garbage data\n"); - printf( - "[-PL num] :if -PL option is specified 0 encode with narrowband encoder" - " (infile is narrowband)\n"); - printf( - " num=2 => decode with narrowband decoder" - " (outfile is narrowband)\n\n"); - printf("[-CE num] :Test of APIs used by Conference Engine.\n"); - printf( - " CE 1 - createInternal, freeInternal," - " getNewBitstream \n"); - printf(" CE 2 - transcode, getBWE \n"); - printf(" CE 3 - getSendBWE, setSendBWE. \n\n"); - printf( - "[-RTP_INIT num] :if -RTP_INIT option is specified num will be" - " the initial\n"); - printf(" value of the rtp sequence number.\n\n"); - printf("[--isolated_script_test_perf_output=file]\n"); - printf( - " :If this option is specified, perf values will be" - " written to this file in a JSON format.\n\n"); - printf("Example usage :\n\n"); - printf("%s -I bottleneck.txt speechIn.pcm speechOut.pcm\n\n", argv[0]); - exit(1); - } - - /* Print version number */ - WebRtcIsacfix_version(version_number); - printf("iSAC version %s \n\n", version_number); - - /* Loop over all command line arguments */ - CodingMode = 0; - testNum = 0; - testCE = 0; - i = 1; - - /* Instantaneous mode */ - if (!strcmp("-I", argv[i])) { - printf("\nInstantaneous BottleNeck\n"); - CodingMode = 1; - i++; - } - - /* Bottleneck value is processed after the for */ - i++; - - /* Get Input and Output files */ - sscanf(argv[i++], "%s", inname); - sscanf(argv[i++], "%s", outname); - - for (; i < argc; i++) { - /* Set (initial) bottleneck value */ - if (!strcmp("-INITRATE", argv[i])) { - if (i + 1 >= argc) { - printf("-INITRATE requires a parameter.\n"); - exit(1); - } - rateBPS = atoi(argv[i + 1]); - setControlBWE = 1; - if ((rateBPS < 10000) || (rateBPS > 32000)) { - printf( - "\n%d is not a initial rate. " - "Valid values are in the range 10000 to 32000.\n", - rateBPS); - exit(1); - } - printf("\nNew initial rate: %d\n", rateBPS); - i++; - } - - /* Set (initial) framelength */ - if (!strcmp("-FL", argv[i])) { - if (i + 1 >= argc) { - printf("-FL requires a parameter.\n"); - exit(1); - } - framesize = atoi(argv[i + 1]); - if ((framesize != 30) && (framesize != 60)) { - printf( - "\n%d is not a valid frame length. " - "Valid length are 30 and 60 msec.\n", - framesize); - exit(1); - } - printf("\nFrame Length: %d\n", framesize); - i++; - } - - /* Fixed frame length */ - if (!strcmp("-FIXED_FL", argv[i])) { - fixedFL = 1; - setControlBWE = 1; - } - - /* Set maximum allowed payload size in bytes */ - if (!strcmp("-MAX", argv[i])) { - if (i + 1 >= argc) { - printf("-MAX requires a parameter.\n"); - exit(1); - } - payloadSize = atoi(argv[i + 1]); - printf("Maximum Payload Size: %d\n", payloadSize); - i++; - } - - /* Set maximum rate in bytes */ - if (!strcmp("-MAXRATE", argv[i])) { - if (i + 1 >= argc) { - printf("-MAXRATE requires a parameter.\n"); - exit(1); - } - payloadRate = atoi(argv[i + 1]); - printf("Maximum Rate in kbps: %d\n", payloadRate); - i++; - } - - /* Test of fault scenarious */ - if (!strcmp("-F", argv[i])) { - if (i + 1 >= argc) { - printf("-F requires a parameter."); - exit(1); - } - testNum = atoi(argv[i + 1]); - printf("\nFault test: %d\n", testNum); - if (testNum < 1 || testNum > 10) { - printf( - "\n%d is not a valid Fault Scenario number." - " Valid Fault Scenarios are numbered 1-10.\n", - testNum); - exit(1); - } - i++; - } - - /* Packet loss test */ - if (!strcmp("-PL", argv[i])) { - if (i + 1 >= argc) { - printf("-PL requires a parameter.\n"); - exit(1); - } - if (isdigit(*argv[i + 1])) { - packetLossPercent = atoi(argv[i + 1]); - if ((packetLossPercent < 0) | (packetLossPercent > 100)) { - printf("\nInvalid packet loss perentage \n"); - exit(1); - } - if (packetLossPercent > 0) { - printf("\nSimulating %d %% of independent packet loss\n", - packetLossPercent); - } else { - printf("\nNo Packet Loss Is Simulated \n"); - } - readLoss = 0; - } else { - readLoss = 1; - plFile = fopen(argv[i + 1], "rb"); - if (plFile == NULL) { - FAIL() << "Couldn't open the frameloss file: " << argv[i + 1]; - } - printf( - "\nSimulating packet loss through the given " - "channel file: %s\n", - argv[i + 1]); - } - i++; - } - - /* Random packetlosses */ - if (!strcmp("-rnd", argv[i])) { - srand(time(NULL)); - printf("\n Random pattern in lossed packets \n"); - } - - /* Use gns file */ - if (!strcmp("-G", argv[i])) { - if (i + 1 >= argc) { - printf("-G requires a parameter.\n"); - exit(1); - } - sscanf(argv[i + 1], "%s", gns_file); - fp_gns = fopen(gns_file, "rb"); - if (fp_gns == NULL) { - FAIL() << "Cannot read file " << gns_file << "."; - } - gns = 1; - i++; - } - - /* Run Narrowband interfaces (either encoder or decoder) */ - if (!strcmp("-NB", argv[i])) { - if (i + 1 >= argc) { - printf("-NB requires a parameter.\n"); - exit(1); - } - nbTest = atoi(argv[i + 1]); - i++; - } - - /* Run Conference Engine APIs */ - if (!strcmp("-CE", argv[i])) { - if (i + 1 >= argc) { - printf("-CE requires a parameter.\n"); - exit(1); - } - testCE = atoi(argv[i + 1]); - if (testCE == 1 || testCE == 2) { - i++; - scale = (float)atof(argv[i + 1]); - } else if (testCE < 1 || testCE > 3) { - printf( - "\n%d is not a valid CE-test number, valid Fault " - "Scenarios are numbered 1-3\n", - testCE); - exit(1); - } - i++; - } - - /* Set initial RTP number */ - if (!strcmp("-RTP_INIT", argv[i])) { - if (i + 1 >= argc) { - printf("-RTP_INIT requires a parameter.\n"); - exit(1); - } - i++; - } - - if (strstr(argv[i], "--isolated_script_test_perf_output") == argv[i]) { - const char* filename_start = strstr(argv[i], "="); - if (!filename_start || strlen(filename_start) < 2) { - printf("Expected --isolated_script_test_perf_output=/some/filename\n"); - exit(1); - } - perf_result_file = filename_start + 1; - } - } - - /* Get Bottleneck value */ - /* Gns files and bottleneck should not and can not be used simultaneously */ - bottleneck = atoi(argv[CodingMode + 1]); - if (bottleneck == 0 && gns == 0) { - sscanf(argv[CodingMode + 1], "%s", bottleneck_file); - f_bn = fopen(bottleneck_file, "rb"); - if (f_bn == NULL) { - printf("No value provided for BottleNeck\n"); - FAIL() << "Cannot read file " << bottleneck_file; - } else { - int aux_var; - printf("reading bottleneck rates from file %s\n\n", bottleneck_file); - if (fscanf(f_bn, "%d", &aux_var) == EOF) { - /* Set pointer to beginning of file */ - fseek(f_bn, 0L, SEEK_SET); - if (fscanf(f_bn, "%d", &aux_var) == EOF) { - FAIL(); - } - } - bottleneck = (int16_t)aux_var; - /* Bottleneck is a cosine function - * Matlab code for writing the bottleneck file: - * BottleNeck_10ms = 20e3 + 10e3 * cos((0:5999)/5999*2*pi); - * fid = fopen('bottleneck.txt', 'wb'); - * fprintf(fid, '%d\n', BottleNeck_10ms); fclose(fid); - */ - } - } else { - f_bn = NULL; - printf("\nfixed bottleneck rate of %d bits/s\n\n", bottleneck); - } - - if (CodingMode == 0) { - printf("\nAdaptive BottleNeck\n"); - } - - /* Add '.bit' to output bitstream file */ - while ((int)outname[h] != 0) { - outbitsname[h] = outname[h]; - h++; - } - for (k = 0; k < 5; k++) { - outbitsname[h] = tmpBit[k]; - h++; - } - if ((inp = fopen(inname, "rb")) == NULL) { - FAIL() << " iSAC: Cannot read file " << inname; - } - if ((outp = fopen(outname, "wb")) == NULL) { - FAIL() << " iSAC: Cannot write file " << outname; - } - - if ((outbits = fopen(outbitsname, "wb")) == NULL) { - FAIL() << " iSAC: Cannot write file " << outbitsname; - } - printf("\nInput:%s\nOutput:%s\n\n", inname, outname); - - /* Error test number 10, garbage data */ - if (testNum == 10) { - /* Test to run decoder with garbage data */ - srand(random_seed); - - if ((seedfile = fopen(SEED_FILE, "a+t")) == NULL) { - printf("Error: Could not open file %s\n", SEED_FILE); - } else { - fprintf(seedfile, "%u\n", random_seed); - fclose(seedfile); - } - } - - /* Runtime statistics */ - starttime = clock() / (double)CLOCKS_PER_SEC; - - /* Initialize the ISAC and BN structs */ - if (testNum != 8) { - err = WebRtcIsacfix_Create(&ISAC_main_inst); - - /* Error check */ - if (err < 0) { - printf("\n\n Error in create.\n\n"); - } - if (testCE == 1) { - err = WebRtcIsacfix_CreateInternal(ISAC_main_inst); - /* Error check */ - if (err < 0) { - printf("\n\n Error in createInternal.\n\n"); - } - } - } - - /* Init of bandwidth data */ - BN_data.send_time = 0; - BN_data.arrival_time = 0; - BN_data.sample_count = 0; - BN_data.rtp_number = 0; - - /* Initialize encoder and decoder */ - framecnt = 0; - endfile = 0; - if (testNum != 1) { - WebRtcIsacfix_EncoderInit(ISAC_main_inst, CodingMode); - } - if (testNum != 2) { - WebRtcIsacfix_DecoderInit(ISAC_main_inst); - } - - if (CodingMode == 1) { - err = WebRtcIsacfix_Control(ISAC_main_inst, bottleneck, framesize); - if (err < 0) { - /* exit if returned with error */ - errtype = WebRtcIsacfix_GetErrorCode(ISAC_main_inst); - printf("\n\n Error in control: %d.\n\n", errtype); - } - } else if (setControlBWE == 1) { - err = WebRtcIsacfix_ControlBwe(ISAC_main_inst, rateBPS, framesize, fixedFL); - } - - if (payloadSize != 0) { - err = WebRtcIsacfix_SetMaxPayloadSize(ISAC_main_inst, payloadSize); - if (err < 0) { - /* exit if returned with error */ - errtype = WebRtcIsacfix_GetErrorCode(ISAC_main_inst); - FAIL() << "Error in SetMaxPayloadSize: " << errtype; - } - } - if (payloadRate != 0) { - err = WebRtcIsacfix_SetMaxRate(ISAC_main_inst, payloadRate); - if (err < 0) { - /* exit if returned with error */ - errtype = WebRtcIsacfix_GetErrorCode(ISAC_main_inst); - FAIL() << "Error in SetMaxRateInBytes: " << errtype; - } - } - - *speechType = 1; - - while (endfile == 0) { - if (testNum == 7 && (rand() % 2 == 0)) { - err = WebRtcIsacfix_EncoderInit(ISAC_main_inst, CodingMode); - /* Error check */ - if (err < 0) { - errtype = WebRtcIsacfix_GetErrorCode(ISAC_main_inst); - printf("\n\n Error in encoderinit: %d.\n\n", errtype); - } - - WebRtcIsacfix_DecoderInit(ISAC_main_inst); - } - - cur_framesmpls = 0; - while (1) { - /* Read 10 ms speech block */ - if (nbTest != 1) { - endfile = readframe(shortdata, inp, FRAMESAMPLES_10ms); - } else { - endfile = readframe(shortdata, inp, (FRAMESAMPLES_10ms / 2)); - } - - if (testNum == 7) { - srand(time(NULL)); - } - - /* iSAC encoding */ - if (!(testNum == 3 && framecnt == 0)) { - if (nbTest != 1) { - short bwe; - - /* Encode */ - stream_len_int = WebRtcIsacfix_Encode(ISAC_main_inst, shortdata, - (uint8_t*)streamdata); - - /* If packet is ready, and CE testing, call the different API - functions from the internal API. */ - if (stream_len_int > 0) { - if (testCE == 1) { - err = WebRtcIsacfix_ReadBwIndex( - reinterpret_cast(streamdata), - static_cast(stream_len_int), &bwe); - stream_len_int = WebRtcIsacfix_GetNewBitStream( - ISAC_main_inst, bwe, scale, - reinterpret_cast(streamdata)); - } else if (testCE == 2) { - /* transcode function not supported */ - } else if (testCE == 3) { - /* Only for Function testing. The functions should normally - not be used in this way */ - - err = WebRtcIsacfix_GetDownLinkBwIndex(ISAC_main_inst, &bwe); - /* Error Check */ - if (err < 0) { - errtype = WebRtcIsacfix_GetErrorCode(ISAC_main_inst); - printf("\nError in getSendBWE: %d.\n", errtype); - } - - err = WebRtcIsacfix_UpdateUplinkBw(ISAC_main_inst, bwe); - /* Error Check */ - if (err < 0) { - errtype = WebRtcIsacfix_GetErrorCode(ISAC_main_inst); - printf("\nError in setBWE: %d.\n", errtype); - } - } - } - } else { - stream_len_int = -1; - } - } else { - break; - } - - if (stream_len_int < 0 || err < 0) { - /* exit if returned with error */ - errtype = WebRtcIsacfix_GetErrorCode(ISAC_main_inst); - printf("\nError in encoder: %d.\n", errtype); - } else { - stream_len = static_cast(stream_len_int); - if (fwrite(streamdata, sizeof(char), stream_len, outbits) != - stream_len) { - FAIL(); - } - } - - cur_framesmpls += FRAMESAMPLES_10ms; - - /* read next bottleneck rate */ - if (f_bn != NULL) { - int aux_var; - if (fscanf(f_bn, "%d", &aux_var) == EOF) { - /* Set pointer to beginning of file */ - fseek(f_bn, 0L, SEEK_SET); - if (fscanf(f_bn, "%d", &aux_var) == EOF) { - FAIL(); - } - } - bottleneck = (int16_t)aux_var; - if (CodingMode == 1) { - WebRtcIsacfix_Control(ISAC_main_inst, bottleneck, framesize); - } - } - - /* exit encoder loop if the encoder returned a bitstream */ - if (stream_len != 0) - break; - } - - /* make coded sequence to short be inreasing */ - /* the length the decoder expects */ - if (testNum == 4) { - stream_len += 10; - } - - /* make coded sequence to long be decreasing */ - /* the length the decoder expects */ - if (testNum == 5) { - stream_len -= 10; - } - - if (testNum == 6) { - srand(time(NULL)); - for (i = 0; i < static_cast(stream_len); i++) { - streamdata[i] = rand(); - } - } - - /* set pointer to beginning of file */ - if (fp_gns != NULL) { - if (fscanf(fp_gns, "%d", &cur_delay) == EOF) { - fseek(fp_gns, 0L, SEEK_SET); - if (fscanf(fp_gns, "%d", &cur_delay) == EOF) { - FAIL(); - } - } - } - - /* simulate packet handling through NetEq and the modem */ - if (!(testNum == 3 && framecnt == 0)) { - if (gns == 0) { - get_arrival_time(cur_framesmpls, stream_len, bottleneck, &BN_data); - } else { - get_arrival_time2(cur_framesmpls, cur_delay, &BN_data); - } - } - - /* packet not dropped */ - if (cur_delay != -1) { - /* Error test number 10, garbage data */ - if (testNum == 10) { - for (i = 0; i < static_cast(stream_len); i++) { - streamdata[i] = (short)(streamdata[i] + (short)rand()); - } - } - - if (testNum != 9) { - err = WebRtcIsacfix_UpdateBwEstimate( - ISAC_main_inst, reinterpret_cast(streamdata), - stream_len, BN_data.rtp_number, BN_data.send_time, - BN_data.arrival_time); - - if (err < 0) { - /* exit if returned with error */ - errtype = WebRtcIsacfix_GetErrorCode(ISAC_main_inst); - printf("\nError in decoder: %d.\n", errtype); - } - } - - if (readLoss == 1) { - if (fread(&lostFrame, sizeof(int16_t), 1, plFile) != 1) { - rewind(plFile); - } - lostFrame = !lostFrame; - } else { - lostFrame = (rand() % 100 < packetLossPercent); - } - - /* iSAC decoding */ - if (lostFrame && framecnt > 0) { - if (nbTest != 2) { - declen = static_cast( - WebRtcIsacfix_DecodePlc(ISAC_main_inst, decoded, prevFrameSize)); - } else { - declen = -1; - } - lostPackets++; - } else { - if (nbTest != 2) { - size_t FL; - /* Call getFramelen, only used here for function test */ - err = WebRtcIsacfix_ReadFrameLen( - reinterpret_cast(streamdata), stream_len, &FL); - declen = WebRtcIsacfix_Decode( - ISAC_main_inst, reinterpret_cast(streamdata), - stream_len, decoded, speechType); - /* Error check */ - if (err < 0 || declen < 0 || FL != static_cast(declen)) { - errtype = WebRtcIsacfix_GetErrorCode(ISAC_main_inst); - printf( - "\nError %d in ReadFrameLen (%s), Decode (%s), with FL %zu and " - "declen %d.\n", - errtype, err < 0 ? "yes" : "no", declen < 0 ? "yes" : "no", FL, - declen); - } - prevFrameSize = static_cast(declen / 480); - - } else { - declen = -1; - prevFrameSize = static_cast(declen / 240); - } - } - - if (declen <= 0) { - /* exit if returned with error */ - errtype = WebRtcIsacfix_GetErrorCode(ISAC_main_inst); - printf("\nError in decoder: %d.\n", errtype); - } - - /* Write decoded speech frame to file */ - if (fwrite(decoded, sizeof(int16_t), declen, outp) != (size_t)declen) { - FAIL(); - } - // fprintf( ratefile, "%f \n", stream_len / ( ((double)declen)/ - // ((double)FS) ) * 8 ); - } else { - lostPackets++; - } - framecnt++; - - totalsmpls += declen; - totalbits += static_cast(8 * stream_len); - - /* Error test number 10, garbage data */ - if (testNum == 10) { - if ((seedfile = fopen(SEED_FILE, "a+t")) == NULL) { - printf("Error: Could not open file %s\n", SEED_FILE); - } else { - fprintf(seedfile, "ok\n\n"); - fclose(seedfile); - } - } - } - printf("\nLost Frames %d ~ %4.1f%%\n", lostPackets, - (double)lostPackets / (double)framecnt * 100.0); - printf("\n\ntotal bits = %d bits", totalbits); - printf("\nmeasured average bitrate = %0.3f kbits/s", - (double)totalbits * (FS / 1000) / totalsmpls); - printf("\n"); - - /* Runtime statistics */ - - runtime = (double)(((double)clock() / (double)CLOCKS_PER_SEC) - starttime); - length_file = ((double)framecnt * (double)declen / FS); - printf("\n\nLength of speech file: %.1f s\n", length_file); - printf("Time to run iSAC: %.2f s (%.2f %% of realtime)\n\n", runtime, - (100 * runtime / length_file)); - printf("\n\n_______________________________________________\n"); - - // Record the results with Perf test tools. - webrtc::test::PrintResult("isac", "", "time_per_10ms_frame", - (runtime * 10) / length_file, "ms", false); - - if (perf_result_file) { - EXPECT_TRUE(webrtc::test::WritePerfResults(perf_result_file)); - } - - fclose(inp); - fclose(outp); - fclose(outbits); - - if (testCE == 1) { - WebRtcIsacfix_FreeInternal(ISAC_main_inst); - } - WebRtcIsacfix_Free(ISAC_main_inst); -} - -int main(int argc, char* argv[]) { - ::testing::InitGoogleTest(&argc, argv); - global_argc = argc; - global_argv = argv; - - return RUN_ALL_TESTS(); -} diff --git a/modules/audio_coding/codecs/isac/isac_webrtc_api_test.cc b/modules/audio_coding/codecs/isac/isac_webrtc_api_test.cc index a2e1e088e6..cafca75e46 100644 --- a/modules/audio_coding/codecs/isac/isac_webrtc_api_test.cc +++ b/modules/audio_coding/codecs/isac/isac_webrtc_api_test.cc @@ -51,8 +51,8 @@ std::unique_ptr GetPcmTestFileReader(int sample_rate_hz) { filename = test::ResourcePath("audio_coding/testfile32kHz", "pcm"); break; default: - RTC_NOTREACHED() << "No test file available for " << sample_rate_hz - << " Hz."; + RTC_DCHECK_NOTREACHED() + << "No test file available for " << sample_rate_hz << " Hz."; } auto pcm_file = std::make_unique(); pcm_file->ReadStereo(false); @@ -183,8 +183,8 @@ TEST_P(EncoderTest, TestDynamicBitrateChange) { } } // kHighBps / kLowBps == 1.25, so require the high-bitrate run to produce at - // least 1.2 times the number of bytes. - EXPECT_LT(1.2 * num_bytes[kLowBps], num_bytes[kHighBps]); + // least 1.195 times the number of bytes. + EXPECT_LT(1.195 * num_bytes[kLowBps], num_bytes[kHighBps]); } // Checks that, given a target bitrate, the encoder does not overshoot too much. @@ -203,10 +203,10 @@ TEST_P(EncoderTest, DoNotOvershootTargetBitrate) { e->Encode(/*rtp_timestamp=*/0, AudioFrameToView(in), &encoded); num_bytes += encoded.size(); } - // Inverse of the duration of |kNumFrames| 10 ms frames (unit: seconds^-1). + // Inverse of the duration of `kNumFrames` 10 ms frames (unit: seconds^-1). constexpr float kAudioDurationInv = 100.f / kNumFrames; const int measured_bitrate_bps = 8 * num_bytes * kAudioDurationInv; - EXPECT_LT(measured_bitrate_bps, bitrate_bps + 2000); // Max 2 kbps extra. + EXPECT_LT(measured_bitrate_bps, bitrate_bps + 2250); // Max 2250 bps extra. } } diff --git a/modules/audio_coding/codecs/isac/main/include/isac.h b/modules/audio_coding/codecs/isac/main/include/isac.h index 3d2caefc23..f45bbb3897 100644 --- a/modules/audio_coding/codecs/isac/main/include/isac.h +++ b/modules/audio_coding/codecs/isac/main/include/isac.h @@ -606,7 +606,7 @@ int WebRtcIsac_DecodeRcu(ISACStruct* ISAC_main_inst, int16_t* decoded, int16_t* speechType); -/* If |inst| is a decoder but not an encoder: tell it what sample rate the +/* If `inst` is a decoder but not an encoder: tell it what sample rate the encoder is using, for bandwidth estimation purposes. */ void WebRtcIsac_SetEncSampRateInDecoder(ISACStruct* inst, int sample_rate_hz); diff --git a/modules/audio_coding/codecs/isac/main/source/arith_routines.h b/modules/audio_coding/codecs/isac/main/source/arith_routines.h index 6e7ea1da5e..3f9f6de7bb 100644 --- a/modules/audio_coding/codecs/isac/main/source/arith_routines.h +++ b/modules/audio_coding/codecs/isac/main/source/arith_routines.h @@ -24,9 +24,9 @@ int WebRtcIsac_EncLogisticMulti2( Bitstr* streamdata, /* in-/output struct containing bitstream */ int16_t* dataQ7, /* input: data vector */ const uint16_t* - env, /* input: side info vector defining the width of the pdf */ - const int N, /* input: data vector length */ - const int16_t isSWB12kHz); /* if the codec is working in 12kHz bandwidth */ + env, /* input: side info vector defining the width of the pdf */ + int N, /* input: data vector length */ + int16_t isSWB12kHz); /* if the codec is working in 12kHz bandwidth */ /* returns the number of bytes in the stream */ int WebRtcIsac_EncTerminate( @@ -38,15 +38,15 @@ int WebRtcIsac_DecLogisticMulti2( Bitstr* streamdata, /* in-/output struct containing bitstream */ const uint16_t* env, /* input: side info vector defining the width of the pdf */ - const int16_t* dither, /* input: dither vector */ - const int N, /* input: data vector length */ - const int16_t isSWB12kHz); /* if the codec is working in 12kHz bandwidth */ + const int16_t* dither, /* input: dither vector */ + int N, /* input: data vector length */ + int16_t isSWB12kHz); /* if the codec is working in 12kHz bandwidth */ void WebRtcIsac_EncHistMulti( Bitstr* streamdata, /* in-/output struct containing bitstream */ const int* data, /* input: data vector */ const uint16_t* const* cdf, /* input: array of cdf arrays */ - const int N); /* input: data vector length */ + int N); /* input: data vector length */ int WebRtcIsac_DecHistBisectMulti( int* data, /* output: data vector */ @@ -54,7 +54,7 @@ int WebRtcIsac_DecHistBisectMulti( const uint16_t* const* cdf, /* input: array of cdf arrays */ const uint16_t* cdf_size, /* input: array of cdf table sizes+1 (power of two: 2^k) */ - const int N); /* input: data vector length */ + int N); /* input: data vector length */ int WebRtcIsac_DecHistOneStepMulti( int* data, /* output: data vector */ @@ -62,6 +62,6 @@ int WebRtcIsac_DecHistOneStepMulti( const uint16_t* const* cdf, /* input: array of cdf arrays */ const uint16_t* init_index, /* input: vector of initial cdf table search entries */ - const int N); /* input: data vector length */ + int N); /* input: data vector length */ #endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ARITH_ROUTINES_H_ */ diff --git a/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h b/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h index 221e65ff3f..5f4550a3a5 100644 --- a/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h +++ b/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h @@ -92,11 +92,11 @@ int32_t WebRtcIsac_InitBandwidthEstimator( * estimated by other side */ /* returns 0 if everything went fine, -1 otherwise */ int16_t WebRtcIsac_UpdateBandwidthEstimator(BwEstimatorstr* bwest_str, - const uint16_t rtp_number, - const int32_t frame_length, - const uint32_t send_ts, - const uint32_t arr_ts, - const size_t pksize); + uint16_t rtp_number, + int32_t frame_length, + uint32_t send_ts, + uint32_t arr_ts, + size_t pksize); /* Update receiving estimates. Used when we only receive BWE index, no iSAC data * packet. */ @@ -131,10 +131,10 @@ int32_t WebRtcIsac_GetUplinkMaxDelay(const BwEstimatorstr* bwest_str); */ int WebRtcIsac_GetMinBytes( RateModel* State, - int StreamSize, /* bytes in bitstream */ - const int FrameLen, /* ms per frame */ - const double BottleNeck, /* bottle neck rate; excl headers (bps) */ - const double DelayBuildUp, /* max delay from bottleneck buffering (ms) */ + int StreamSize, /* bytes in bitstream */ + int FrameLen, /* ms per frame */ + double BottleNeck, /* bottle neck rate; excl headers (bps) */ + double DelayBuildUp, /* max delay from bottleneck buffering (ms) */ enum ISACBandwidth bandwidth /*,int16_t frequentLargePackets*/); @@ -143,9 +143,9 @@ int WebRtcIsac_GetMinBytes( */ void WebRtcIsac_UpdateRateModel( RateModel* State, - int StreamSize, /* bytes in bitstream */ - const int FrameSamples, /* samples per frame */ - const double BottleNeck); /* bottle neck rate; excl headers (bps) */ + int StreamSize, /* bytes in bitstream */ + int FrameSamples, /* samples per frame */ + double BottleNeck); /* bottle neck rate; excl headers (bps) */ void WebRtcIsac_InitRateModel(RateModel* State); diff --git a/modules/audio_coding/codecs/isac/main/source/entropy_coding.c b/modules/audio_coding/codecs/isac/main/source/entropy_coding.c index 6692a519ca..188c8f6b86 100644 --- a/modules/audio_coding/codecs/isac/main/source/entropy_coding.c +++ b/modules/audio_coding/codecs/isac/main/source/entropy_coding.c @@ -1446,7 +1446,7 @@ void WebRtcIsac_EncodeRc(int16_t* RCQ15, Bitstr* streamdata) { index[k] = WebRtcIsac_kQArRcInitIndex[k]; // The safe-guards in following while conditions are to suppress gcc 4.8.3 // warnings, Issue 2888. Otherwise, first and last elements of - // |WebRtcIsac_kQArBoundaryLevels| are such that the following search + // `WebRtcIsac_kQArBoundaryLevels` are such that the following search // *never* cause an out-of-boundary read. if (RCQ15[k] > WebRtcIsac_kQArBoundaryLevels[index[k]]) { while (index[k] + 1 < NUM_AR_RC_QUANT_BAUNDARY && diff --git a/modules/audio_coding/codecs/isac/main/source/pitch_filter.c b/modules/audio_coding/codecs/isac/main/source/pitch_filter.c index 61cd533ab3..bf03dfff2e 100644 --- a/modules/audio_coding/codecs/isac/main/source/pitch_filter.c +++ b/modules/audio_coding/codecs/isac/main/source/pitch_filter.c @@ -25,8 +25,8 @@ * Post-filtering: * y(z) = x(z) - damper(z) * gain * (x(z) + y(z)) * z ^ (-lag); * - * Note that |lag| is a floating number so we perform an interpolation to - * obtain the correct |lag|. + * Note that `lag` is a floating number so we perform an interpolation to + * obtain the correct `lag`. * */ @@ -86,7 +86,7 @@ typedef enum { * buffer : a buffer where the sum of previous inputs and outputs * are stored. * damper_state : the state of the damping filter. The filter is defined by - * |kDampFilter|. + * `kDampFilter`. * interpol_coeff : pointer to a set of coefficient which are used to utilize * fractional pitch by interpolation. * gain : pitch-gain to be applied to the current segment of input. @@ -140,9 +140,9 @@ static void FilterSegment(const double* in_data, PitchFilterParam* parameters, int j; double sum; double sum2; - /* Index of |parameters->buffer| where the output is written to. */ + /* Index of `parameters->buffer` where the output is written to. */ int pos = parameters->index + PITCH_BUFFSIZE; - /* Index of |parameters->buffer| where samples are read for fractional-lag + /* Index of `parameters->buffer` where samples are read for fractional-lag * computation. */ int pos_lag = pos - parameters->lag_offset; @@ -174,9 +174,9 @@ static void FilterSegment(const double* in_data, PitchFilterParam* parameters, /* Filter for fractional pitch. */ sum2 = 0.0; for (m = PITCH_FRACORDER-1; m >= m_tmp; --m) { - /* |lag_index + m| is always larger than or equal to zero, see how + /* `lag_index + m` is always larger than or equal to zero, see how * m_tmp is computed. This is equivalent to assume samples outside - * |out_dg[j]| are zero. */ + * `out_dg[j]` are zero. */ sum2 += out_dg[j][lag_index + m] * parameters->interpol_coeff[m]; } /* Add the contribution of differential gain change. */ @@ -353,7 +353,7 @@ static void FilterFrame(const double* in_data, PitchFiltstr* filter_state, if ((mode == kPitchFilterPreGain) || (mode == kPitchFilterPreLa)) { /* Filter the lookahead segment, this is treated as the last sub-frame. So - * set |pf_param| to last sub-frame. */ + * set `pf_param` to last sub-frame. */ filter_parameters.sub_frame = PITCH_SUBFRAMES - 1; filter_parameters.num_samples = QLOOKAHEAD; FilterSegment(in_data, &filter_parameters, out_data, out_dg); diff --git a/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc b/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc index d9efc211da..dacf325082 100644 --- a/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc +++ b/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc @@ -59,7 +59,7 @@ std::vector LegacyEncodedAudioFrame::SplitBySamples( new LegacyEncodedAudioFrame(decoder, std::move(payload))); results.emplace_back(timestamp, 0, std::move(frame)); } else { - // Reduce the split size by half as long as |split_size_bytes| is at least + // Reduce the split size by half as long as `split_size_bytes` is at least // twice the minimum chunk size (so that the resulting size is at least as // large as the minimum chunk size). while (split_size_bytes >= 2 * min_chunk_size) { diff --git a/modules/audio_coding/codecs/legacy_encoded_audio_frame_unittest.cc b/modules/audio_coding/codecs/legacy_encoded_audio_frame_unittest.cc index f081a5380f..f81aeeea80 100644 --- a/modules/audio_coding/codecs/legacy_encoded_audio_frame_unittest.cc +++ b/modules/audio_coding/codecs/legacy_encoded_audio_frame_unittest.cc @@ -88,7 +88,7 @@ class SplitBySamplesTest : public ::testing::TestWithParam { samples_per_ms_ = 8; break; default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); break; } } @@ -138,7 +138,6 @@ TEST_P(SplitBySamplesTest, PayloadSizes) { EXPECT_EQ(expected_split.num_frames, results.size()); uint32_t expected_timestamp = kBaseTimestamp; - uint32_t expected_byte_offset = 0; uint8_t value = 0; for (size_t i = 0; i != expected_split.num_frames; ++i) { const auto& result = results[i]; @@ -155,7 +154,6 @@ TEST_P(SplitBySamplesTest, PayloadSizes) { expected_timestamp += rtc::checked_cast( expected_split.frame_sizes[i] * samples_per_ms_); - expected_byte_offset += rtc::checked_cast(length_bytes); } } } diff --git a/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.cc b/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.cc index c8fd176fbb..285ea89959 100644 --- a/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.cc +++ b/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.cc @@ -26,6 +26,7 @@ std::unique_ptr AudioDecoderMultiChannelOpusImpl::MakeAudioDecoder( AudioDecoderMultiChannelOpusConfig config) { if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); return nullptr; } // Fill the pointer with a working decoder through the C interface. This @@ -78,6 +79,9 @@ AudioDecoderMultiChannelOpusImpl::SdpToConfig(const SdpAudioFormat& format) { return absl::nullopt; } config.channel_mapping = *channel_mapping; + if (!config.IsOk()) { + return absl::nullopt; + } return config; } diff --git a/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.h b/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.h index efc3f0dda8..2ff47a8a53 100644 --- a/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.h +++ b/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.h @@ -21,7 +21,6 @@ #include "api/audio_codecs/opus/audio_decoder_multi_channel_opus_config.h" #include "modules/audio_coding/codecs/opus/opus_interface.h" #include "rtc_base/buffer.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -32,6 +31,11 @@ class AudioDecoderMultiChannelOpusImpl final : public AudioDecoder { ~AudioDecoderMultiChannelOpusImpl() override; + AudioDecoderMultiChannelOpusImpl(const AudioDecoderMultiChannelOpusImpl&) = + delete; + AudioDecoderMultiChannelOpusImpl& operator=( + const AudioDecoderMultiChannelOpusImpl&) = delete; + std::vector ParsePayload(rtc::Buffer&& payload, uint32_t timestamp) override; void Reset() override; @@ -63,7 +67,6 @@ class AudioDecoderMultiChannelOpusImpl final : public AudioDecoder { OpusDecInst* dec_state_; const AudioDecoderMultiChannelOpusConfig config_; - RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderMultiChannelOpusImpl); }; } // namespace webrtc diff --git a/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_unittest.cc b/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_unittest.cc index 66eecb758e..57e2107f3c 100644 --- a/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_unittest.cc +++ b/modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_unittest.cc @@ -45,13 +45,7 @@ TEST(AudioDecoderMultiOpusTest, InvalidChannelMappings) { {"num_streams", "2"}}); const absl::optional decoder_config = AudioDecoderMultiChannelOpus::SdpToConfig(sdp_format); - ASSERT_TRUE(decoder_config.has_value()); - EXPECT_FALSE(decoder_config->IsOk()); - - const std::unique_ptr opus_decoder = - AudioDecoderMultiChannelOpus::MakeAudioDecoder(*decoder_config); - - EXPECT_FALSE(opus_decoder); + EXPECT_FALSE(decoder_config.has_value()); } { // The mapping is too long. There are only 5 channels, but 6 elements in the @@ -62,13 +56,7 @@ TEST(AudioDecoderMultiOpusTest, InvalidChannelMappings) { {"num_streams", "2"}}); const absl::optional decoder_config = AudioDecoderMultiChannelOpus::SdpToConfig(sdp_format); - ASSERT_TRUE(decoder_config.has_value()); - EXPECT_FALSE(decoder_config->IsOk()); - - const std::unique_ptr opus_decoder = - AudioDecoderMultiChannelOpus::MakeAudioDecoder(*decoder_config); - - EXPECT_FALSE(opus_decoder); + EXPECT_FALSE(decoder_config.has_value()); } { // The mapping doesn't parse correctly. diff --git a/modules/audio_coding/codecs/opus/audio_decoder_opus.h b/modules/audio_coding/codecs/opus/audio_decoder_opus.h index c79272284d..e8fd0440bc 100644 --- a/modules/audio_coding/codecs/opus/audio_decoder_opus.h +++ b/modules/audio_coding/codecs/opus/audio_decoder_opus.h @@ -19,7 +19,6 @@ #include "api/audio_codecs/audio_decoder.h" #include "modules/audio_coding/codecs/opus/opus_interface.h" #include "rtc_base/buffer.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -29,6 +28,9 @@ class AudioDecoderOpusImpl final : public AudioDecoder { int sample_rate_hz = 48000); ~AudioDecoderOpusImpl() override; + AudioDecoderOpusImpl(const AudioDecoderOpusImpl&) = delete; + AudioDecoderOpusImpl& operator=(const AudioDecoderOpusImpl&) = delete; + std::vector ParsePayload(rtc::Buffer&& payload, uint32_t timestamp) override; void Reset() override; @@ -55,7 +57,6 @@ class AudioDecoderOpusImpl final : public AudioDecoder { OpusDecInst* dec_state_; const size_t channels_; const int sample_rate_hz_; - RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderOpusImpl); }; } // namespace webrtc diff --git a/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.cc b/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.cc index 1feef3d359..38a11c123d 100644 --- a/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.cc +++ b/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.cc @@ -131,6 +131,7 @@ AudioEncoderMultiChannelOpusImpl::MakeAudioEncoder( const AudioEncoderMultiChannelOpusConfig& config, int payload_type) { if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); return nullptr; } return std::make_unique(config, @@ -280,6 +281,9 @@ AudioEncoderMultiChannelOpusImpl::SdpToConfig(const SdpAudioFormat& format) { } config.channel_mapping = *channel_mapping; + if (!config.IsOk()) { + return absl::nullopt; + } return config; } diff --git a/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.h b/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.h index eadb4a6eb9..8a7210515c 100644 --- a/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.h +++ b/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.h @@ -21,7 +21,6 @@ #include "api/audio_codecs/opus/audio_encoder_multi_channel_opus_config.h" #include "api/units/time_delta.h" #include "modules/audio_coding/codecs/opus/opus_interface.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -34,6 +33,11 @@ class AudioEncoderMultiChannelOpusImpl final : public AudioEncoder { int payload_type); ~AudioEncoderMultiChannelOpusImpl() override; + AudioEncoderMultiChannelOpusImpl(const AudioEncoderMultiChannelOpusImpl&) = + delete; + AudioEncoderMultiChannelOpusImpl& operator=( + const AudioEncoderMultiChannelOpusImpl&) = delete; + // Static interface for use by BuiltinAudioEncoderFactory. static constexpr const char* GetPayloadName() { return "multiopus"; } static absl::optional QueryAudioEncoder( @@ -81,7 +85,6 @@ class AudioEncoderMultiChannelOpusImpl final : public AudioEncoder { int next_frame_length_ms_; friend struct AudioEncoderMultiChannelOpus; - RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderMultiChannelOpusImpl); }; } // namespace webrtc diff --git a/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_unittest.cc b/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_unittest.cc index 44da7d7ffd..92f6f2c169 100644 --- a/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_unittest.cc +++ b/modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_unittest.cc @@ -28,10 +28,9 @@ TEST(AudioEncoderMultiOpusTest, CheckConfigValidity) { {"num_streams", "2"}}); const absl::optional encoder_config = AudioEncoderMultiChannelOpus::SdpToConfig(sdp_format); - ASSERT_TRUE(encoder_config.has_value()); // Maps input channel 0 to coded channel 3, which doesn't exist. - EXPECT_FALSE(encoder_config->IsOk()); + EXPECT_FALSE(encoder_config.has_value()); } { @@ -41,10 +40,9 @@ TEST(AudioEncoderMultiOpusTest, CheckConfigValidity) { {"num_streams", "2"}}); const absl::optional encoder_config = AudioEncoderMultiChannelOpus::SdpToConfig(sdp_format); - ASSERT_TRUE(encoder_config.has_value()); // The mapping is too short. - EXPECT_FALSE(encoder_config->IsOk()); + EXPECT_FALSE(encoder_config.has_value()); } { const SdpAudioFormat sdp_format("multiopus", 48000, 3, @@ -53,10 +51,9 @@ TEST(AudioEncoderMultiOpusTest, CheckConfigValidity) { {"num_streams", "1"}}); const absl::optional encoder_config = AudioEncoderMultiChannelOpus::SdpToConfig(sdp_format); - ASSERT_TRUE(encoder_config.has_value()); // Coded channel 0 comes from both input channels 0, 1 and 2. - EXPECT_FALSE(encoder_config->IsOk()); + EXPECT_FALSE(encoder_config.has_value()); } { const SdpAudioFormat sdp_format("multiopus", 48000, 3, @@ -77,11 +74,10 @@ TEST(AudioEncoderMultiOpusTest, CheckConfigValidity) { {"num_streams", "2"}}); const absl::optional encoder_config = AudioEncoderMultiChannelOpus::SdpToConfig(sdp_format); - ASSERT_TRUE(encoder_config.has_value()); // This is NOT fine, because channels nothing says how coded channel 1 // should be coded. - EXPECT_FALSE(encoder_config->IsOk()); + EXPECT_FALSE(encoder_config.has_value()); } } @@ -105,7 +101,7 @@ TEST(AudioEncoderMultiOpusTest, ConfigValuesAreParsedCorrectly) { testing::ContainerEq(std::vector({0, 4, 1, 2, 3, 5}))); } -TEST(AudioEncoderMultiOpusTest, CreateFromValidOrInvalidConfig) { +TEST(AudioEncoderMultiOpusTest, CreateFromValidConfig) { { const SdpAudioFormat sdp_format("multiopus", 48000, 3, {{"channel_mapping", "0,255,255"}, @@ -113,19 +109,7 @@ TEST(AudioEncoderMultiOpusTest, CreateFromValidOrInvalidConfig) { {"num_streams", "2"}}); const absl::optional encoder_config = AudioEncoderMultiChannelOpus::SdpToConfig(sdp_format); - ASSERT_TRUE(encoder_config.has_value()); - - // Invalid config from the ConfigValidity test. It's not allowed by our - // checks, but Opus is more forgiving. - EXPECT_FALSE(encoder_config->IsOk()); - - const std::unique_ptr opus_encoder = - AudioEncoderMultiChannelOpus::MakeAudioEncoder(*encoder_config, - kOpusPayloadType); - - // Shouldn't be possible (but shouldn't result in a crash) to create an - // Encoder from an invalid config. - EXPECT_FALSE(opus_encoder); + ASSERT_FALSE(encoder_config.has_value()); } { const SdpAudioFormat sdp_format("multiopus", 48000, 3, diff --git a/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/modules/audio_coding/codecs/opus/audio_encoder_opus.cc index 7c62e98c5b..fe6d85263a 100644 --- a/modules/audio_coding/codecs/opus/audio_encoder_opus.cc +++ b/modules/audio_coding/codecs/opus/audio_encoder_opus.cc @@ -229,7 +229,10 @@ AudioCodecInfo AudioEncoderOpusImpl::QueryAudioEncoder( std::unique_ptr AudioEncoderOpusImpl::MakeAudioEncoder( const AudioEncoderOpusConfig& config, int payload_type) { - RTC_DCHECK(config.IsOk()); + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return nullptr; + } return std::make_unique(config, payload_type); } @@ -268,7 +271,10 @@ absl::optional AudioEncoderOpusImpl::SdpToConfig( FindSupportedFrameLengths(min_frame_length_ms, max_frame_length_ms, &config.supported_frame_lengths_ms); - RTC_DCHECK(config.IsOk()); + if (!config.IsOk()) { + RTC_DCHECK_NOTREACHED(); + return absl::nullopt; + } return config; } @@ -564,9 +570,9 @@ void AudioEncoderOpusImpl::OnReceivedOverhead( void AudioEncoderOpusImpl::SetReceiverFrameLengthRange( int min_frame_length_ms, int max_frame_length_ms) { - // Ensure that |SetReceiverFrameLengthRange| is called before - // |EnableAudioNetworkAdaptor|, otherwise we need to recreate - // |audio_network_adaptor_|, which is not a needed use case. + // Ensure that `SetReceiverFrameLengthRange` is called before + // `EnableAudioNetworkAdaptor`, otherwise we need to recreate + // `audio_network_adaptor_`, which is not a needed use case. RTC_DCHECK(!audio_network_adaptor_); FindSupportedFrameLengths(min_frame_length_ms, max_frame_length_ms, &config_.supported_frame_lengths_ms); diff --git a/modules/audio_coding/codecs/opus/audio_encoder_opus.h b/modules/audio_coding/codecs/opus/audio_encoder_opus.h index ab954feba7..14477cc317 100644 --- a/modules/audio_coding/codecs/opus/audio_encoder_opus.h +++ b/modules/audio_coding/codecs/opus/audio_encoder_opus.h @@ -23,7 +23,6 @@ #include "common_audio/smoothing_filter.h" #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h" #include "modules/audio_coding/codecs/opus/opus_interface.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -61,6 +60,9 @@ class AudioEncoderOpusImpl final : public AudioEncoder { AudioEncoderOpusImpl(int payload_type, const SdpAudioFormat& format); ~AudioEncoderOpusImpl() override; + AudioEncoderOpusImpl(const AudioEncoderOpusImpl&) = delete; + AudioEncoderOpusImpl& operator=(const AudioEncoderOpusImpl&) = delete; + int SampleRateHz() const override; size_t NumChannels() const override; int RtpTimestampRateHz() const override; @@ -139,7 +141,7 @@ class AudioEncoderOpusImpl final : public AudioEncoder { absl::optional link_capacity_allocation); // TODO(minyue): remove "override" when we can deprecate - // |AudioEncoder::SetTargetBitrate|. + // `AudioEncoder::SetTargetBitrate`. void SetTargetBitrate(int target_bps) override; void ApplyAudioNetworkAdaptor(); @@ -175,7 +177,6 @@ class AudioEncoderOpusImpl final : public AudioEncoder { int consecutive_dtx_frames_; friend struct AudioEncoderOpus; - RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderOpusImpl); }; } // namespace webrtc diff --git a/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc b/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc index f1953eaacf..daca6aaffe 100644 --- a/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc +++ b/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc @@ -228,8 +228,8 @@ TEST_P(AudioEncoderOpusTest, TEST_P(AudioEncoderOpusTest, SetReceiverFrameLengthRange) { auto states = CreateCodec(sample_rate_hz_, 2); - // Before calling to |SetReceiverFrameLengthRange|, - // |supported_frame_lengths_ms| should contain only the frame length being + // Before calling to `SetReceiverFrameLengthRange`, + // `supported_frame_lengths_ms` should contain only the frame length being // used. using ::testing::ElementsAre; EXPECT_THAT(states->encoder->supported_frame_lengths_ms(), @@ -348,7 +348,7 @@ TEST_P(AudioEncoderOpusTest, // will fail. constexpr float kPacketLossFraction_1 = 0.02f; constexpr float kPacketLossFraction_2 = 0.198f; - // |kSecondSampleTimeMs| is chosen to ease the calculation since + // `kSecondSampleTimeMs` is chosen to ease the calculation since // 0.9999 ^ 6931 = 0.5. constexpr int64_t kSecondSampleTimeMs = 6931; @@ -380,7 +380,7 @@ TEST_P(AudioEncoderOpusTest, DoNotInvokeSetTargetBitrateIfOverheadUnknown) { states->encoder->OnReceivedUplinkBandwidth(kDefaultOpusRate * 2, absl::nullopt); - // Since |OnReceivedOverhead| has not been called, the codec bitrate should + // Since `OnReceivedOverhead` has not been called, the codec bitrate should // not change. EXPECT_EQ(kDefaultOpusRate, states->encoder->GetTargetBitrate()); } diff --git a/modules/audio_coding/codecs/opus/opus_fec_test.cc b/modules/audio_coding/codecs/opus/opus_fec_test.cc index 1923647fba..0636935b6b 100644 --- a/modules/audio_coding/codecs/opus/opus_fec_test.cc +++ b/modules/audio_coding/codecs/opus/opus_fec_test.cc @@ -218,8 +218,8 @@ TEST_P(OpusFecTest, RandomPacketLossTest) { time_now_ms += block_duration_ms_; - // |data_pointer_| is incremented and wrapped across - // |loop_length_samples_|. + // `data_pointer_` is incremented and wrapped across + // `loop_length_samples_`. data_pointer_ = (data_pointer_ + block_length_sample_ * channels_) % loop_length_samples_; } diff --git a/modules/audio_coding/codecs/opus/opus_interface.cc b/modules/audio_coding/codecs/opus/opus_interface.cc index f684452ad5..033791971f 100644 --- a/modules/audio_coding/codecs/opus/opus_interface.cc +++ b/modules/audio_coding/codecs/opus/opus_interface.cc @@ -574,8 +574,8 @@ void WebRtcOpus_DecoderInit(OpusDecInst* inst) { /* For decoder to determine if it is to output speech or comfort noise. */ static int16_t DetermineAudioType(OpusDecInst* inst, size_t encoded_bytes) { - // Audio type becomes comfort noise if |encoded_byte| is 1 and keeps - // to be so if the following |encoded_byte| are 0 or 1. + // Audio type becomes comfort noise if `encoded_byte` is 1 and keeps + // to be so if the following `encoded_byte` are 0 or 1. if (encoded_bytes == 0 && inst->in_dtx_mode) { return 2; // Comfort noise. } else if (encoded_bytes == 1 || encoded_bytes == 2) { @@ -595,7 +595,7 @@ static int16_t DetermineAudioType(OpusDecInst* inst, size_t encoded_bytes) { } } -/* |frame_size| is set to maximum Opus frame size in the normal case, and +/* `frame_size` is set to maximum Opus frame size in the normal case, and * is set to the number of samples needed for PLC in case of losses. * It is up to the caller to make sure the value is correct. */ static int DecodeNative(OpusDecInst* inst, @@ -632,9 +632,9 @@ static int DecodePlc(OpusDecInst* inst, int16_t* decoded) { FrameSizePerChannel(kWebRtcOpusPlcFrameSizeMs, inst->sample_rate_hz); if (inst->plc_use_prev_decoded_samples) { - /* The number of samples we ask for is |number_of_lost_frames| times - * |prev_decoded_samples_|. Limit the number of samples to maximum - * |MaxFrameSizePerChannel()|. */ + /* The number of samples we ask for is `number_of_lost_frames` times + * `prev_decoded_samples_`. Limit the number of samples to maximum + * `MaxFrameSizePerChannel()`. */ plc_samples = inst->prev_decoded_samples; const int max_samples_per_channel = MaxFrameSizePerChannel(inst->sample_rate_hz); @@ -729,9 +729,9 @@ int WebRtcOpus_DurationEst(OpusDecInst* inst, int WebRtcOpus_PlcDuration(OpusDecInst* inst) { if (inst->plc_use_prev_decoded_samples) { - /* The number of samples we ask for is |number_of_lost_frames| times - * |prev_decoded_samples_|. Limit the number of samples to maximum - * |MaxFrameSizePerChannel()|. */ + /* The number of samples we ask for is `number_of_lost_frames` times + * `prev_decoded_samples_`. Limit the number of samples to maximum + * `MaxFrameSizePerChannel()`. */ const int plc_samples = inst->prev_decoded_samples; const int max_samples_per_channel = MaxFrameSizePerChannel(inst->sample_rate_hz); @@ -826,8 +826,8 @@ int WebRtcOpus_PacketHasFec(const uint8_t* payload, // as binary values with uniform probability, they can be extracted directly // from the most significant bits of the first byte of compressed data. for (int n = 0; n < channels; n++) { - // The LBRR bit for channel 1 is on the (|silk_frames| + 1)-th bit, and - // that of channel 2 is on the |(|silk_frames| + 1) * 2 + 1|-th bit. + // The LBRR bit for channel 1 is on the (`silk_frames` + 1)-th bit, and + // that of channel 2 is on the |(`silk_frames` + 1) * 2 + 1|-th bit. if (frame_data[0][0] & (0x80 >> ((n + 1) * (silk_frames + 1) - 1))) return 1; } diff --git a/modules/audio_coding/codecs/opus/opus_unittest.cc b/modules/audio_coding/codecs/opus/opus_unittest.cc index 80cab50137..b40d73805f 100644 --- a/modules/audio_coding/codecs/opus/opus_unittest.cc +++ b/modules/audio_coding/codecs/opus/opus_unittest.cc @@ -115,10 +115,10 @@ class OpusTest void TestCbrEffect(bool dtx, int block_length_ms); - // Prepare |speech_data_| for encoding, read from a hard-coded file. - // After preparation, |speech_data_.GetNextBlock()| returns a pointer to a - // block of |block_length_ms| milliseconds. The data is looped every - // |loop_length_ms| milliseconds. + // Prepare `speech_data_` for encoding, read from a hard-coded file. + // After preparation, `speech_data_.GetNextBlock()` returns a pointer to a + // block of `block_length_ms` milliseconds. The data is looped every + // `loop_length_ms` milliseconds. void PrepareSpeechData(int block_length_ms, int loop_length_ms); int EncodeDecode(WebRtcOpusEncInst* encoder, @@ -310,24 +310,24 @@ void OpusTest::TestDtxEffect(bool dtx, int block_length_ms) { // one with an arbitrary size and the other of 1-byte, then stops sending for // a certain number of frames. - // |max_dtx_frames| is the maximum number of frames Opus can stay in DTX. + // `max_dtx_frames` is the maximum number of frames Opus can stay in DTX. // TODO(kwiberg): Why does this number depend on the encoding sample rate? const int max_dtx_frames = (encoder_sample_rate_hz_ == 16000 ? 800 : 400) / block_length_ms + 1; - // We run |kRunTimeMs| milliseconds of pure silence. + // We run `kRunTimeMs` milliseconds of pure silence. const int kRunTimeMs = 4500; - // We check that, after a |kCheckTimeMs| milliseconds (given that the CNG in + // We check that, after a `kCheckTimeMs` milliseconds (given that the CNG in // Opus needs time to adapt), the absolute values of DTX decoded signal are - // bounded by |kOutputValueBound|. + // bounded by `kOutputValueBound`. const int kCheckTimeMs = 4000; #if defined(OPUS_FIXED_POINT) // Fixed-point Opus generates a random (comfort) noise, which has a less // predictable value bound than its floating-point Opus. This value depends on // input signal, and the time window for checking the output values (between - // |kCheckTimeMs| and |kRunTimeMs|). + // `kCheckTimeMs` and `kRunTimeMs`). const uint16_t kOutputValueBound = 30; #else @@ -336,7 +336,7 @@ void OpusTest::TestDtxEffect(bool dtx, int block_length_ms) { int time = 0; while (time < kRunTimeMs) { - // DTX mode is maintained for maximum |max_dtx_frames| frames. + // DTX mode is maintained for maximum `max_dtx_frames` frames. int i = 0; for (; i < max_dtx_frames; ++i) { time += block_length_ms; diff --git a/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h b/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h index a89dfd8ca7..a280ca2410 100644 --- a/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h +++ b/modules/audio_coding/codecs/opus/test/audio_ring_buffer.h @@ -29,11 +29,11 @@ class AudioRingBuffer final { AudioRingBuffer(size_t channels, size_t max_frames); ~AudioRingBuffer(); - // Copies |data| to the buffer and advances the write pointer. |channels| must + // Copies `data` to the buffer and advances the write pointer. `channels` must // be the same as at creation time. void Write(const float* const* data, size_t channels, size_t frames); - // Copies from the buffer to |data| and advances the read pointer. |channels| + // Copies from the buffer to `data` and advances the read pointer. `channels` // must be the same as at creation time. void Read(float* const* data, size_t channels, size_t frames); diff --git a/modules/audio_coding/codecs/opus/test/blocker.cc b/modules/audio_coding/codecs/opus/test/blocker.cc index 7f102b54af..33406cead9 100644 --- a/modules/audio_coding/codecs/opus/test/blocker.cc +++ b/modules/audio_coding/codecs/opus/test/blocker.cc @@ -16,7 +16,7 @@ namespace { -// Adds |a| and |b| frame by frame into |result| (basically matrix addition). +// Adds `a` and `b` frame by frame into `result` (basically matrix addition). void AddFrames(const float* const* a, size_t a_start_index, const float* const* b, @@ -33,7 +33,7 @@ void AddFrames(const float* const* a, } } -// Copies |src| into |dst| channel by channel. +// Copies `src` into `dst` channel by channel. void CopyFrames(const float* const* src, size_t src_start_index, size_t num_frames, @@ -46,7 +46,7 @@ void CopyFrames(const float* const* src, } } -// Moves |src| into |dst| channel by channel. +// Moves `src` into `dst` channel by channel. void MoveFrames(const float* const* src, size_t src_start_index, size_t num_frames, @@ -69,8 +69,8 @@ void ZeroOut(float* const* buffer, } } -// Pointwise multiplies each channel of |frames| with |window|. Results are -// stored in |frames|. +// Pointwise multiplies each channel of `frames` with `window`. Results are +// stored in `frames`. void ApplyWindow(const float* window, size_t num_frames, size_t num_channels, @@ -134,7 +134,7 @@ Blocker::~Blocker() = default; // On each call to ProcessChunk(): // 1. New input gets read into sections _b_ and _c_ of the input buffer. // 2. We block starting from frame_offset. -// 3. We block until we reach a block |bl| that doesn't contain any frames +// 3. We block until we reach a block `bl` that doesn't contain any frames // from sections _a_ or _b_ of the input buffer. // 4. We window the current block, fire the callback for processing, window // again, and overlap/add to the output buffer. @@ -142,7 +142,7 @@ Blocker::~Blocker() = default; // 6. For both the input and the output buffers, we copy section _c_ into // section _a_. // 7. We set the new frame_offset to be the difference between the first frame -// of |bl| and the border between sections _b_ and _c_. +// of `bl` and the border between sections _b_ and _c_. // // When block_size > chunk_size the input and output buffers look like this: // @@ -153,13 +153,13 @@ Blocker::~Blocker() = default; // On each call to ProcessChunk(): // The procedure is the same as above, except for: // 1. New input gets read into section _c_ of the input buffer. -// 3. We block until we reach a block |bl| that doesn't contain any frames +// 3. We block until we reach a block `bl` that doesn't contain any frames // from section _a_ of the input buffer. // 5. We copy section _a_ of the output buffer into output. // 6. For both the input and the output buffers, we copy sections _b_ and _c_ // into section _a_ and _b_. // 7. We set the new frame_offset to be the difference between the first frame -// of |bl| and the border between sections _a_ and _b_. +// of `bl` and the border between sections _a_ and _b_. // // * delay here refers to inintial_delay_ // diff --git a/modules/audio_coding/codecs/opus/test/blocker.h b/modules/audio_coding/codecs/opus/test/blocker.h index 26177bcadd..59b7e29621 100644 --- a/modules/audio_coding/codecs/opus/test/blocker.h +++ b/modules/audio_coding/codecs/opus/test/blocker.h @@ -39,7 +39,7 @@ class BlockerCallback { // of audio, which is not a power of 2. Blocker allows us to specify the // transform and all other necessary processing via the Process() callback // function without any constraints on the transform-size -// (read: |block_size_|) or received-audio-size (read: |chunk_size_|). +// (read: `block_size_`) or received-audio-size (read: `chunk_size_`). // We handle this for the multichannel audio case, allowing for different // numbers of input and output channels (for example, beamforming takes 2 or // more input channels and returns 1 output channel). Audio signals are @@ -53,8 +53,8 @@ class BlockerCallback { // sending back a processed chunk // // To use blocker: -// 1. Impelment a BlockerCallback object |bc|. -// 2. Instantiate a Blocker object |b|, passing in |bc|. +// 1. Impelment a BlockerCallback object `bc`. +// 2. Instantiate a Blocker object `b`, passing in `bc`. // 3. As you receive audio, call b.ProcessChunk() to get processed audio. // // A small amount of delay is added to the first received chunk to deal with @@ -101,7 +101,7 @@ class Blocker { // input and output buffers are responsible for saving those frames between // calls to ProcessChunk(). // - // Both contain |initial delay| + |chunk_size| frames. The input is a fairly + // Both contain |initial delay| + `chunk_size` frames. The input is a fairly // standard FIFO, but due to the overlap-add it's harder to use an // AudioRingBuffer for the output. AudioRingBuffer input_buffer_; @@ -116,7 +116,7 @@ class Blocker { std::unique_ptr window_; // The amount of frames between the start of contiguous blocks. For example, - // |shift_amount_| = |block_size_| / 2 for a Hann window. + // `shift_amount_` = `block_size_` / 2 for a Hann window. size_t shift_amount_; BlockerCallback* callback_; diff --git a/modules/audio_coding/codecs/opus/test/lapped_transform.h b/modules/audio_coding/codecs/opus/test/lapped_transform.h index 3620df3831..bb25c34a9e 100644 --- a/modules/audio_coding/codecs/opus/test/lapped_transform.h +++ b/modules/audio_coding/codecs/opus/test/lapped_transform.h @@ -84,11 +84,11 @@ class LappedTransform { std::complex* const* out_block) = 0; }; - // Construct a transform instance. |chunk_length| is the number of samples in - // each channel. |window| defines the window, owned by the caller (a copy is - // made internally); |window| should have length equal to |block_length|. - // |block_length| defines the length of a block, in samples. - // |shift_amount| is in samples. |callback| is the caller-owned audio + // Construct a transform instance. `chunk_length` is the number of samples in + // each channel. `window` defines the window, owned by the caller (a copy is + // made internally); `window` should have length equal to `block_length`. + // `block_length` defines the length of a block, in samples. + // `shift_amount` is in samples. `callback` is the caller-owned audio // processing function called for each block of the input chunk. LappedTransform(size_t num_in_channels, size_t num_out_channels, @@ -99,10 +99,10 @@ class LappedTransform { Callback* callback); ~LappedTransform(); - // Main audio processing helper method. Internally slices |in_chunk| into + // Main audio processing helper method. Internally slices `in_chunk` into // blocks, transforms them to frequency domain, calls the callback for each // block and returns a de-blocked time domain chunk of audio through - // |out_chunk|. Both buffers are caller-owned. + // `out_chunk`. Both buffers are caller-owned. void ProcessChunk(const float* const* in_chunk, float* const* out_chunk); // Get the chunk length. @@ -132,8 +132,8 @@ class LappedTransform { // Returns the initial delay. // - // This is the delay introduced by the |blocker_| to be able to get and return - // chunks of |chunk_length|, but process blocks of |block_length|. + // This is the delay introduced by the `blocker_` to be able to get and return + // chunks of `chunk_length`, but process blocks of `block_length`. size_t initial_delay() const { return blocker_.initial_delay(); } private: diff --git a/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc b/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc index 1dd2ff289e..7761efe8b3 100644 --- a/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc +++ b/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc @@ -42,7 +42,12 @@ int AudioDecoderPcm16B::DecodeInternal(const uint8_t* encoded, int16_t* decoded, SpeechType* speech_type) { RTC_DCHECK_EQ(sample_rate_hz_, sample_rate_hz); - size_t ret = WebRtcPcm16b_Decode(encoded, encoded_len, decoded); + // Adjust the encoded length down to ensure the same number of samples in each + // channel. + const size_t encoded_len_adjusted = + PacketDuration(encoded, encoded_len) * 2 * + Channels(); // 2 bytes per sample per channel + size_t ret = WebRtcPcm16b_Decode(encoded, encoded_len_adjusted, decoded); *speech_type = ConvertSpeechType(1); return static_cast(ret); } diff --git a/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h b/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h index f08c4a6298..6f50161d3f 100644 --- a/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h +++ b/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h @@ -18,13 +18,16 @@ #include "api/audio_codecs/audio_decoder.h" #include "rtc_base/buffer.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { class AudioDecoderPcm16B final : public AudioDecoder { public: AudioDecoderPcm16B(int sample_rate_hz, size_t num_channels); + + AudioDecoderPcm16B(const AudioDecoderPcm16B&) = delete; + AudioDecoderPcm16B& operator=(const AudioDecoderPcm16B&) = delete; + void Reset() override; std::vector ParsePayload(rtc::Buffer&& payload, uint32_t timestamp) override; @@ -42,7 +45,6 @@ class AudioDecoderPcm16B final : public AudioDecoder { private: const int sample_rate_hz_; const size_t num_channels_; - RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderPcm16B); }; } // namespace webrtc diff --git a/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h b/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h index 71c757250a..c363b40b3f 100644 --- a/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h +++ b/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h @@ -12,7 +12,6 @@ #define MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_ENCODER_PCM16B_H_ #include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -29,6 +28,9 @@ class AudioEncoderPcm16B final : public AudioEncoderPcm { explicit AudioEncoderPcm16B(const Config& config) : AudioEncoderPcm(config, config.sample_rate_hz) {} + AudioEncoderPcm16B(const AudioEncoderPcm16B&) = delete; + AudioEncoderPcm16B& operator=(const AudioEncoderPcm16B&) = delete; + protected: size_t EncodeCall(const int16_t* audio, size_t input_len, @@ -37,9 +39,6 @@ class AudioEncoderPcm16B final : public AudioEncoderPcm { size_t BytesPerSample() const override; AudioEncoder::CodecType GetCodecType() const override; - - private: - RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderPcm16B); }; } // namespace webrtc diff --git a/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc b/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc index c72768e937..9643c7b1a5 100644 --- a/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc +++ b/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc @@ -24,6 +24,8 @@ namespace webrtc { static constexpr const int kRedMaxPacketSize = 1 << 10; // RED packets must be less than 1024 bytes to fit the 10 bit // block length. +static constexpr const size_t kRedMaxTimestampDelta = + 1 << 14; // RED packets can encode a timestamp delta of 14 bits. static constexpr const size_t kAudioMaxRtpPacketLen = 1200; // The typical MTU is 1200 bytes. @@ -32,7 +34,7 @@ static constexpr size_t kRedLastHeaderLength = 1; // reduced size for last RED header. static constexpr size_t kRedNumberOfRedundantEncodings = - 2; // The level of redundancy we support. + 1; // The level of redundancy we support. AudioEncoderCopyRed::Config::Config() = default; AudioEncoderCopyRed::Config::Config(Config&&) = default; @@ -43,7 +45,7 @@ size_t GetMaxRedundancyFromFieldTrial() { webrtc::field_trial::FindFullName("WebRTC-Audio-Red-For-Opus"); size_t redundancy = 0; if (sscanf(red_trial.c_str(), "Enabled-%zu", &redundancy) != 1 || - redundancy < 1 || redundancy > 9) { + redundancy > 9) { return kRedNumberOfRedundantEncodings; } return redundancy; @@ -100,7 +102,7 @@ AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeImpl( RTC_CHECK(info.redundant.empty()) << "Cannot use nested redundant encoders."; RTC_DCHECK_EQ(primary_encoded_.size(), info.encoded_bytes); - if (info.encoded_bytes == 0 || info.encoded_bytes > kRedMaxPacketSize) { + if (info.encoded_bytes == 0 || info.encoded_bytes >= kRedMaxPacketSize) { return info; } RTC_DCHECK_GT(max_packet_length_, info.encoded_bytes); @@ -110,7 +112,9 @@ AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeImpl( auto it = redundant_encodings_.begin(); // Determine how much redundancy we can fit into our packet by - // iterating forward. + // iterating forward. This is determined both by the length as well + // as the timestamp difference. The latter can occur with opus DTX which + // has timestamp gaps of 400ms which exceeds REDs timestamp delta field size. for (; it != redundant_encodings_.end(); it++) { if (bytes_available < kRedHeaderLength + it->first.encoded_bytes) { break; @@ -118,16 +122,14 @@ AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeImpl( if (it->first.encoded_bytes == 0) { break; } + if (rtp_timestamp - it->first.encoded_timestamp >= kRedMaxTimestampDelta) { + break; + } bytes_available -= kRedHeaderLength + it->first.encoded_bytes; header_length_bytes += kRedHeaderLength; } - // Allocate room for RFC 2198 header if there is redundant data. - // Otherwise this will send the primary payload type without - // wrapping in RED. - if (header_length_bytes == kRedLastHeaderLength) { - header_length_bytes = 0; - } + // Allocate room for RFC 2198 header. encoded->SetSize(header_length_bytes); // Iterate backwards and append the data. @@ -145,36 +147,34 @@ AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeImpl( info.redundant.push_back(it->first); } - // |info| will be implicitly cast to an EncodedInfoLeaf struct, effectively + // `info` will be implicitly cast to an EncodedInfoLeaf struct, effectively // discarding the (empty) vector of redundant information. This is // intentional. - if (header_length_bytes > 0) { + if (header_length_bytes > kRedHeaderLength) { info.redundant.push_back(info); RTC_DCHECK_EQ(info.speech, info.redundant[info.redundant.size() - 1].speech); } encoded->AppendData(primary_encoded_); - if (header_length_bytes > 0) { - RTC_DCHECK_EQ(header_offset, header_length_bytes - 1); - encoded->data()[header_offset] = info.payload_type; - } + RTC_DCHECK_EQ(header_offset, header_length_bytes - 1); + encoded->data()[header_offset] = info.payload_type; // Shift the redundant encodings. - it = redundant_encodings_.begin(); - for (auto next = std::next(it); next != redundant_encodings_.end(); - it++, next = std::next(it)) { - next->first = it->first; - next->second.SetData(it->second); + auto rit = redundant_encodings_.rbegin(); + for (auto next = std::next(rit); next != redundant_encodings_.rend(); + rit++, next = std::next(rit)) { + rit->first = next->first; + rit->second.SetData(next->second); } it = redundant_encodings_.begin(); - it->first = info; - it->second.SetData(primary_encoded_); + if (it != redundant_encodings_.end()) { + it->first = info; + it->second.SetData(primary_encoded_); + } // Update main EncodedInfo. - if (header_length_bytes > 0) { - info.payload_type = red_payload_type_; - } + info.payload_type = red_payload_type_; info.encoded_bytes = encoded->size(); return info; } diff --git a/modules/audio_coding/codecs/red/audio_encoder_copy_red.h b/modules/audio_coding/codecs/red/audio_encoder_copy_red.h index d5b1bf6868..d163193251 100644 --- a/modules/audio_coding/codecs/red/audio_encoder_copy_red.h +++ b/modules/audio_coding/codecs/red/audio_encoder_copy_red.h @@ -23,7 +23,6 @@ #include "api/audio_codecs/audio_encoder.h" #include "api/units/time_delta.h" #include "rtc_base/buffer.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -47,6 +46,9 @@ class AudioEncoderCopyRed final : public AudioEncoder { ~AudioEncoderCopyRed() override; + AudioEncoderCopyRed(const AudioEncoderCopyRed&) = delete; + AudioEncoderCopyRed& operator=(const AudioEncoderCopyRed&) = delete; + int SampleRateHz() const override; size_t NumChannels() const override; int RtpTimestampRateHz() const override; @@ -92,8 +94,6 @@ class AudioEncoderCopyRed final : public AudioEncoder { size_t max_packet_length_; int red_payload_type_; std::list> redundant_encodings_; - - RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderCopyRed); }; } // namespace webrtc diff --git a/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc b/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc index ddd82441db..0eeac011e0 100644 --- a/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc +++ b/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc @@ -15,6 +15,7 @@ #include "rtc_base/checks.h" #include "rtc_base/numerics/safe_conversions.h" +#include "test/field_trial.h" #include "test/gtest.h" #include "test/mock_audio_encoder.h" #include "test/testsupport/rtc_expect_death.h" @@ -33,6 +34,8 @@ namespace webrtc { namespace { static const size_t kMaxNumSamples = 48 * 10 * 2; // 10 ms @ 48 kHz stereo. +static const size_t kRedLastHeaderLength = + 1; // 1 byte RED header for the last element. } class AudioEncoderCopyRedTest : public ::testing::Test { @@ -153,7 +156,7 @@ TEST_F(AudioEncoderCopyRedTest, CheckNoOutput) { // First call is a special case, since it does not include a secondary // payload. EXPECT_EQ(0u, encoded_info_.redundant.size()); - EXPECT_EQ(kEncodedSize, encoded_info_.encoded_bytes); + EXPECT_EQ(kEncodedSize + kRedLastHeaderLength, encoded_info_.encoded_bytes); // Next call to the speech encoder will not produce any output. Encode(); @@ -166,8 +169,8 @@ TEST_F(AudioEncoderCopyRedTest, CheckNoOutput) { } // Checks that the correct payload sizes are populated into the redundancy -// information. -TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes) { +// information for a redundancy level of 1. +TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes1) { // Let the mock encoder return payload sizes 1, 2, 3, ..., 10 for the sequence // of calls. static const int kNumPackets = 10; @@ -181,9 +184,70 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes) { // payload. Encode(); EXPECT_EQ(0u, encoded_info_.redundant.size()); - EXPECT_EQ(1u, encoded_info_.encoded_bytes); + EXPECT_EQ(kRedLastHeaderLength + 1u, encoded_info_.encoded_bytes); - // Second call is also special since it does not include a ternary + for (size_t i = 2; i <= kNumPackets; ++i) { + Encode(); + ASSERT_EQ(2u, encoded_info_.redundant.size()); + EXPECT_EQ(i, encoded_info_.redundant[1].encoded_bytes); + EXPECT_EQ(i - 1, encoded_info_.redundant[0].encoded_bytes); + EXPECT_EQ(5 + i + (i - 1), encoded_info_.encoded_bytes); + } +} + +// Checks that the correct payload sizes are populated into the redundancy +// information for a redundancy level of 0. +TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes0) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-Audio-Red-For-Opus/Enabled-0/"); + // Recreate the RED encoder to take the new field trial setting into account. + AudioEncoderCopyRed::Config config; + config.payload_type = red_payload_type_; + config.speech_encoder = std::move(red_->ReclaimContainedEncoders()[0]); + red_.reset(new AudioEncoderCopyRed(std::move(config))); + + // Let the mock encoder return payload sizes 1, 2, 3, ..., 10 for the sequence + // of calls. + static const int kNumPackets = 10; + InSequence s; + for (int encode_size = 1; encode_size <= kNumPackets; ++encode_size) { + EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)) + .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(encode_size))); + } + + for (size_t i = 1; i <= kNumPackets; ++i) { + Encode(); + ASSERT_EQ(0u, encoded_info_.redundant.size()); + EXPECT_EQ(1 + i, encoded_info_.encoded_bytes); + } +} +// Checks that the correct payload sizes are populated into the redundancy +// information for a redundancy level of 2. +TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes2) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-Audio-Red-For-Opus/Enabled-2/"); + // Recreate the RED encoder to take the new field trial setting into account. + AudioEncoderCopyRed::Config config; + config.payload_type = red_payload_type_; + config.speech_encoder = std::move(red_->ReclaimContainedEncoders()[0]); + red_.reset(new AudioEncoderCopyRed(std::move(config))); + + // Let the mock encoder return payload sizes 1, 2, 3, ..., 10 for the sequence + // of calls. + static const int kNumPackets = 10; + InSequence s; + for (int encode_size = 1; encode_size <= kNumPackets; ++encode_size) { + EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)) + .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(encode_size))); + } + + // First call is a special case, since it does not include a secondary + // payload. + Encode(); + EXPECT_EQ(0u, encoded_info_.redundant.size()); + EXPECT_EQ(kRedLastHeaderLength + 1u, encoded_info_.encoded_bytes); + + // Second call is also special since it does not include a tertiary // payload. Encode(); EXPECT_EQ(2u, encoded_info_.redundant.size()); @@ -199,6 +263,56 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes) { } } +// Checks that the correct payload sizes are populated into the redundancy +// information for a redundancy level of 3. +TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes3) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-Audio-Red-For-Opus/Enabled-3/"); + // Recreate the RED encoder to take the new field trial setting into account. + AudioEncoderCopyRed::Config config; + config.payload_type = red_payload_type_; + config.speech_encoder = std::move(red_->ReclaimContainedEncoders()[0]); + red_.reset(new AudioEncoderCopyRed(std::move(config))); + + // Let the mock encoder return payload sizes 1, 2, 3, ..., 10 for the sequence + // of calls. + static const int kNumPackets = 10; + InSequence s; + for (int encode_size = 1; encode_size <= kNumPackets; ++encode_size) { + EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)) + .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(encode_size))); + } + + // First call is a special case, since it does not include a secondary + // payload. + Encode(); + EXPECT_EQ(0u, encoded_info_.redundant.size()); + EXPECT_EQ(kRedLastHeaderLength + 1u, encoded_info_.encoded_bytes); + + // Second call is also special since it does not include a tertiary + // payload. + Encode(); + EXPECT_EQ(2u, encoded_info_.redundant.size()); + EXPECT_EQ(8u, encoded_info_.encoded_bytes); + + // Third call is also special since it does not include a quaternary + // payload. + Encode(); + EXPECT_EQ(3u, encoded_info_.redundant.size()); + EXPECT_EQ(15u, encoded_info_.encoded_bytes); + + for (size_t i = 4; i <= kNumPackets; ++i) { + Encode(); + ASSERT_EQ(4u, encoded_info_.redundant.size()); + EXPECT_EQ(i, encoded_info_.redundant[3].encoded_bytes); + EXPECT_EQ(i - 1, encoded_info_.redundant[2].encoded_bytes); + EXPECT_EQ(i - 2, encoded_info_.redundant[1].encoded_bytes); + EXPECT_EQ(i - 3, encoded_info_.redundant[0].encoded_bytes); + EXPECT_EQ(13 + i + (i - 1) + (i - 2) + (i - 3), + encoded_info_.encoded_bytes); + } +} + // Checks that the correct timestamps are returned. TEST_F(AudioEncoderCopyRedTest, CheckTimestamps) { uint32_t primary_timestamp = timestamp_; @@ -243,9 +357,10 @@ TEST_F(AudioEncoderCopyRedTest, CheckPayloads) { // First call is a special case, since it does not include a secondary // payload. Encode(); - EXPECT_EQ(kPayloadLenBytes, encoded_info_.encoded_bytes); + EXPECT_EQ(kRedLastHeaderLength + kPayloadLenBytes, + encoded_info_.encoded_bytes); for (size_t i = 0; i < kPayloadLenBytes; ++i) { - EXPECT_EQ(i, encoded_.data()[i]); + EXPECT_EQ(i, encoded_.data()[kRedLastHeaderLength + i]); } for (int j = 0; j < 1; ++j) { @@ -329,7 +444,96 @@ TEST_F(AudioEncoderCopyRedTest, CheckRFC2198Header) { // redundancy. EXPECT_EQ(encoded_.size(), - 9u + 3 * 10u); // header size + two encoded payloads. + 5u + 2 * 10u); // header size + two encoded payloads. + EXPECT_EQ(encoded_[0], primary_payload_type | 0x80); + + timestamp_delta = encoded_info_.encoded_timestamp - + encoded_info_.redundant[0].encoded_timestamp; + // Timestamp delta is encoded as a 14 bit value. + EXPECT_EQ(encoded_[1], timestamp_delta >> 6); + EXPECT_EQ(static_cast(encoded_[2] >> 2), timestamp_delta & 0x3f); + // Redundant length is encoded as 10 bit value. + EXPECT_EQ(encoded_[2] & 0x3u, encoded_info_.redundant[1].encoded_bytes >> 8); + EXPECT_EQ(encoded_[3], encoded_info_.redundant[1].encoded_bytes & 0xff); + + EXPECT_EQ(encoded_[4], primary_payload_type | 0x80); + timestamp_delta = encoded_info_.encoded_timestamp - + encoded_info_.redundant[1].encoded_timestamp; +} + +// Variant with a redundancy of 0. +TEST_F(AudioEncoderCopyRedTest, CheckRFC2198Header0) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-Audio-Red-For-Opus/Enabled-0/"); + // Recreate the RED encoder to take the new field trial setting into account. + AudioEncoderCopyRed::Config config; + config.payload_type = red_payload_type_; + config.speech_encoder = std::move(red_->ReclaimContainedEncoders()[0]); + red_.reset(new AudioEncoderCopyRed(std::move(config))); + + const int primary_payload_type = red_payload_type_ + 1; + AudioEncoder::EncodedInfo info; + info.encoded_bytes = 10; + info.encoded_timestamp = timestamp_; + info.payload_type = primary_payload_type; + + EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)) + .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info))); + Encode(); + info.encoded_timestamp = timestamp_; // update timestamp. + EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)) + .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info))); + Encode(); // Second call will not produce a redundant encoding. + + EXPECT_EQ(encoded_.size(), + 1u + 1 * 10u); // header size + one encoded payloads. + EXPECT_EQ(encoded_[0], primary_payload_type); +} +// Variant with a redundancy of 2. +TEST_F(AudioEncoderCopyRedTest, CheckRFC2198Header2) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-Audio-Red-For-Opus/Enabled-2/"); + // Recreate the RED encoder to take the new field trial setting into account. + AudioEncoderCopyRed::Config config; + config.payload_type = red_payload_type_; + config.speech_encoder = std::move(red_->ReclaimContainedEncoders()[0]); + red_.reset(new AudioEncoderCopyRed(std::move(config))); + + const int primary_payload_type = red_payload_type_ + 1; + AudioEncoder::EncodedInfo info; + info.encoded_bytes = 10; + info.encoded_timestamp = timestamp_; + info.payload_type = primary_payload_type; + + EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)) + .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info))); + Encode(); + info.encoded_timestamp = timestamp_; // update timestamp. + EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)) + .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info))); + Encode(); // Second call will produce a redundant encoding. + + EXPECT_EQ(encoded_.size(), + 5u + 2 * 10u); // header size + two encoded payloads. + EXPECT_EQ(encoded_[0], primary_payload_type | 0x80); + + uint32_t timestamp_delta = encoded_info_.encoded_timestamp - + encoded_info_.redundant[0].encoded_timestamp; + // Timestamp delta is encoded as a 14 bit value. + EXPECT_EQ(encoded_[1], timestamp_delta >> 6); + EXPECT_EQ(static_cast(encoded_[2] >> 2), timestamp_delta & 0x3f); + // Redundant length is encoded as 10 bit value. + EXPECT_EQ(encoded_[2] & 0x3u, encoded_info_.redundant[1].encoded_bytes >> 8); + EXPECT_EQ(encoded_[3], encoded_info_.redundant[1].encoded_bytes & 0xff); + EXPECT_EQ(encoded_[4], primary_payload_type); + + EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)) + .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info))); + Encode(); // Third call will produce a redundant encoding with double + // redundancy. + + EXPECT_EQ(encoded_.size(), + 9u + 3 * 10u); // header size + three encoded payloads. EXPECT_EQ(encoded_[0], primary_payload_type | 0x80); timestamp_delta = encoded_info_.encoded_timestamp - @@ -379,6 +583,29 @@ TEST_F(AudioEncoderCopyRedTest, RespectsPayloadMTU) { EXPECT_EQ(encoded_.size(), 5u + 500u + 400u); } +TEST_F(AudioEncoderCopyRedTest, LargeTimestampGap) { + const int primary_payload_type = red_payload_type_ + 1; + AudioEncoder::EncodedInfo info; + info.encoded_bytes = 100; + info.encoded_timestamp = timestamp_; + info.payload_type = primary_payload_type; + + EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)) + .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info))); + Encode(); + // Update timestamp to simulate a 400ms gap like the one + // opus DTX causes. + timestamp_ += 19200; + info.encoded_timestamp = timestamp_; // update timestamp. + info.encoded_bytes = 200; + EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)) + .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info))); + Encode(); + + // The old packet will be dropped. + EXPECT_EQ(encoded_.size(), 1u + 200u); +} + #if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) // This test fixture tests various error conditions that makes the diff --git a/modules/audio_coding/codecs/tools/audio_codec_speed_test.h b/modules/audio_coding/codecs/tools/audio_codec_speed_test.h index 59c2f16055..c5f1d7c259 100644 --- a/modules/audio_coding/codecs/tools/audio_codec_speed_test.h +++ b/modules/audio_coding/codecs/tools/audio_codec_speed_test.h @@ -31,9 +31,9 @@ class AudioCodecSpeedTest : public ::testing::TestWithParam { virtual void TearDown(); // EncodeABlock(...) does the following: - // 1. encodes a block of audio, saved in |in_data|, - // 2. save the bit stream to |bit_stream| of |max_bytes| bytes in size, - // 3. assign |encoded_bytes| with the length of the bit stream (in bytes), + // 1. encodes a block of audio, saved in `in_data`, + // 2. save the bit stream to `bit_stream` of `max_bytes` bytes in size, + // 3. assign `encoded_bytes` with the length of the bit stream (in bytes), // 4. return the cost of time (in millisecond) spent on actual encoding. virtual float EncodeABlock(int16_t* in_data, uint8_t* bit_stream, @@ -41,15 +41,15 @@ class AudioCodecSpeedTest : public ::testing::TestWithParam { size_t* encoded_bytes) = 0; // DecodeABlock(...) does the following: - // 1. decodes the bit stream in |bit_stream| with a length of |encoded_bytes| + // 1. decodes the bit stream in `bit_stream` with a length of `encoded_bytes` // (in bytes), - // 2. save the decoded audio in |out_data|, + // 2. save the decoded audio in `out_data`, // 3. return the cost of time (in millisecond) spent on actual decoding. virtual float DecodeABlock(const uint8_t* bit_stream, size_t encoded_bytes, int16_t* out_data) = 0; - // Encoding and decode an audio of |audio_duration| (in seconds) and + // Encoding and decode an audio of `audio_duration` (in seconds) and // record the runtime for encoding and decoding separately. void EncodeDecode(size_t audio_duration); diff --git a/modules/audio_coding/g3doc/index.md b/modules/audio_coding/g3doc/index.md index bf50c155fc..d0f6b9f81b 100644 --- a/modules/audio_coding/g3doc/index.md +++ b/modules/audio_coding/g3doc/index.md @@ -23,10 +23,10 @@ WebRTC audio coding module can handle both audio sending and receiving. Folder Audio decoders are provided by an audio decoder factory. Decoded audio samples should be queried by calling [`PlayoutData10Ms()`][PlayoutData10Ms]. -[acm2]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_coding/acm2/;drc=854d59f7501aac9e9bccfa7b4d1f7f4db7842719 -[Add10MsData]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_coding/include/audio_coding_module.h;l=136;drc=d82a02c837d33cdfd75121e40dcccd32515e42d6 -[codecs]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_coding/codecs/;drc=883fea1548d58e0080f98d66fab2e0c744dfb556 -[ANA]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_coding/audio_network_adaptor/;drc=1f99551775cd876c116d1d90cba94c8a4670d184 -[IncomingPacket]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_coding/include/audio_coding_module.h;l=192;drc=d82a02c837d33cdfd75121e40dcccd32515e42d6 -[NetEq]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_coding/neteq/;drc=213dc2cfc5f1b360b1c6fc51d393491f5de49d3d -[PlayoutData10Ms]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_coding/include/audio_coding_module.h;l=216;drc=d82a02c837d33cdfd75121e40dcccd32515e42d6 +[acm2]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/acm2/;drc=854d59f7501aac9e9bccfa7b4d1f7f4db7842719 +[Add10MsData]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/include/audio_coding_module.h;l=136;drc=d82a02c837d33cdfd75121e40dcccd32515e42d6 +[codecs]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/codecs/;drc=883fea1548d58e0080f98d66fab2e0c744dfb556 +[ANA]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/audio_network_adaptor/;drc=1f99551775cd876c116d1d90cba94c8a4670d184 +[IncomingPacket]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/include/audio_coding_module.h;l=192;drc=d82a02c837d33cdfd75121e40dcccd32515e42d6 +[NetEq]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/neteq/;drc=213dc2cfc5f1b360b1c6fc51d393491f5de49d3d +[PlayoutData10Ms]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/include/audio_coding_module.h;l=216;drc=d82a02c837d33cdfd75121e40dcccd32515e42d6 diff --git a/modules/audio_coding/include/audio_coding_module.h b/modules/audio_coding/include/audio_coding_module.h index 102e2de83b..8b518fb979 100644 --- a/modules/audio_coding/include/audio_coding_module.h +++ b/modules/audio_coding/include/audio_coding_module.h @@ -54,7 +54,7 @@ class AudioPacketizationCallback { uint32_t timestamp, const uint8_t* payload_data, size_t payload_len_bytes) { - RTC_NOTREACHED() << "This method must be overridden, or not used."; + RTC_DCHECK_NOTREACHED() << "This method must be overridden, or not used."; return -1; } }; @@ -83,9 +83,9 @@ class AudioCodingModule { // Sender // - // |modifier| is called exactly once with one argument: a pointer to the + // `modifier` is called exactly once with one argument: a pointer to the // unique_ptr that holds the current encoder (which is null if there is no - // current encoder). For the duration of the call, |modifier| has exclusive + // current encoder). For the duration of the call, `modifier` has exclusive // access to the unique_ptr; it may call the encoder, steal the encoder and // replace it with another encoder or with nullptr, etc. virtual void ModifyEncoder( @@ -190,7 +190,7 @@ class AudioCodingModule { // 0 if payload is successfully pushed in. // virtual int32_t IncomingPacket(const uint8_t* incoming_payload, - const size_t payload_len_bytes, + size_t payload_len_bytes, const RTPHeader& rtp_header) = 0; /////////////////////////////////////////////////////////////////////////// @@ -237,6 +237,8 @@ class AudioCodingModule { NetworkStatistics* network_statistics) = 0; virtual ANAStats GetANAStats() const = 0; + + virtual int GetTargetBitrate() const = 0; }; } // namespace webrtc diff --git a/modules/audio_coding/include/audio_coding_module_typedefs.h b/modules/audio_coding/include/audio_coding_module_typedefs.h index a7210dadcb..e5598e3c45 100644 --- a/modules/audio_coding/include/audio_coding_module_typedefs.h +++ b/modules/audio_coding/include/audio_coding_module_typedefs.h @@ -81,19 +81,22 @@ struct NetworkStatistics { // adding extra delay due to "peaky jitter" bool jitterPeaksFound; // Stats below correspond to similarly-named fields in the WebRTC stats spec. - // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats + // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats uint64_t totalSamplesReceived; uint64_t concealedSamples; uint64_t silentConcealedSamples; uint64_t concealmentEvents; uint64_t jitterBufferDelayMs; uint64_t jitterBufferEmittedCount; - // Non standard stats propagated to spec complaint GetStats API. - uint64_t jitterBufferTargetDelayMs; uint64_t insertedSamplesForDeceleration; uint64_t removedSamplesForAcceleration; uint64_t fecPacketsReceived; uint64_t fecPacketsDiscarded; + // Stats below correspond to similarly-named fields in the WebRTC stats spec. + // https://w3c.github.io/webrtc-stats/#dom-rtcreceivedrtpstreamstats + uint64_t packetsDiscarded; + // Non standard stats propagated to spec complaint GetStats API. + uint64_t jitterBufferTargetDelayMs; // Stats below DO NOT correspond directly to anything in the WebRTC stats // fraction (of original stream) of synthesized audio inserted through // expansion (in Q14) diff --git a/modules/audio_coding/neteq/accelerate.cc b/modules/audio_coding/neteq/accelerate.cc index e97191d8d2..f4ef6cdccb 100644 --- a/modules/audio_coding/neteq/accelerate.cc +++ b/modules/audio_coding/neteq/accelerate.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/neteq/accelerate.h" -#include #include "api/array_view.h" #include "modules/audio_coding/neteq/audio_multi_vector.h" @@ -58,12 +57,12 @@ Accelerate::ReturnCodes Accelerate::CheckCriteriaAndStretch( if ((best_correlation > correlation_threshold) || !active_speech) { // Do accelerate operation by overlap add. - // Pre-calculate common multiplication with |fs_mult_|. + // Pre-calculate common multiplication with `fs_mult_`. // 120 corresponds to 15 ms. size_t fs_mult_120 = fs_mult_ * 120; if (fast_mode) { - // Fit as many multiples of |peak_index| as possible in fs_mult_120. + // Fit as many multiples of `peak_index` as possible in fs_mult_120. // TODO(henrik.lundin) Consider finding multiple correlation peaks and // pick the one with the longest correlation lag in this case. peak_index = (fs_mult_120 / peak_index) * peak_index; @@ -73,11 +72,11 @@ Accelerate::ReturnCodes Accelerate::CheckCriteriaAndStretch( // Copy first part; 0 to 15 ms. output->PushBackInterleaved( rtc::ArrayView(input, fs_mult_120 * num_channels_)); - // Copy the |peak_index| starting at 15 ms to |temp_vector|. + // Copy the `peak_index` starting at 15 ms to `temp_vector`. AudioMultiVector temp_vector(num_channels_); temp_vector.PushBackInterleaved(rtc::ArrayView( &input[fs_mult_120 * num_channels_], peak_index * num_channels_)); - // Cross-fade |temp_vector| onto the end of |output|. + // Cross-fade `temp_vector` onto the end of `output`. output->CrossFade(temp_vector, peak_index); // Copy the last unmodified part, 15 ms + pitch period until the end. output->PushBackInterleaved(rtc::ArrayView( diff --git a/modules/audio_coding/neteq/accelerate.h b/modules/audio_coding/neteq/accelerate.h index 124b63384c..01fe874d54 100644 --- a/modules/audio_coding/neteq/accelerate.h +++ b/modules/audio_coding/neteq/accelerate.h @@ -15,7 +15,6 @@ #include #include "modules/audio_coding/neteq/time_stretch.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -33,11 +32,14 @@ class Accelerate : public TimeStretch { const BackgroundNoise& background_noise) : TimeStretch(sample_rate_hz, num_channels, background_noise) {} + Accelerate(const Accelerate&) = delete; + Accelerate& operator=(const Accelerate&) = delete; + // This method performs the actual Accelerate operation. The samples are - // read from |input|, of length |input_length| elements, and are written to - // |output|. The number of samples removed through time-stretching is - // is provided in the output |length_change_samples|. The method returns - // the outcome of the operation as an enumerator value. If |fast_accelerate| + // read from `input`, of length `input_length` elements, and are written to + // `output`. The number of samples removed through time-stretching is + // is provided in the output `length_change_samples`. The method returns + // the outcome of the operation as an enumerator value. If `fast_accelerate` // is true, the algorithm will relax the requirements on finding strong // correlations, and may remove multiple pitch periods if possible. ReturnCodes Process(const int16_t* input, @@ -47,7 +49,7 @@ class Accelerate : public TimeStretch { size_t* length_change_samples); protected: - // Sets the parameters |best_correlation| and |peak_index| to suitable + // Sets the parameters `best_correlation` and `peak_index` to suitable // values when the signal contains no active speech. void SetParametersForPassiveSpeech(size_t len, int16_t* best_correlation, @@ -62,9 +64,6 @@ class Accelerate : public TimeStretch { bool active_speech, bool fast_mode, AudioMultiVector* output) const override; - - private: - RTC_DISALLOW_COPY_AND_ASSIGN(Accelerate); }; struct AccelerateFactory { diff --git a/modules/audio_coding/neteq/audio_decoder_unittest.cc b/modules/audio_coding/neteq/audio_decoder_unittest.cc index 2277872ee4..bb5c6d167b 100644 --- a/modules/audio_coding/neteq/audio_decoder_unittest.cc +++ b/modules/audio_coding/neteq/audio_decoder_unittest.cc @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include #include #include @@ -31,6 +30,7 @@ #include "modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h" #include "modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h" #include "modules/audio_coding/neteq/tools/resample_input_audio_file.h" +#include "rtc_base/system/arch.h" #include "test/gtest.h" #include "test/testsupport/file_utils.h" @@ -41,7 +41,7 @@ namespace { constexpr int kOverheadBytesPerPacket = 50; // The absolute difference between the input and output (the first channel) is -// compared vs |tolerance|. The parameter |delay| is used to correct for codec +// compared vs `tolerance`. The parameter `delay` is used to correct for codec // delays. void CompareInputOutput(const std::vector& input, const std::vector& output, @@ -57,8 +57,8 @@ void CompareInputOutput(const std::vector& input, } } -// The absolute difference between the first two channels in |output| is -// compared vs |tolerance|. +// The absolute difference between the first two channels in `output` is +// compared vs `tolerance`. void CompareTwoChannels(const std::vector& output, size_t samples_per_channel, size_t channels, @@ -71,7 +71,7 @@ void CompareTwoChannels(const std::vector& output, } // Calculates mean-squared error between input and output (the first channel). -// The parameter |delay| is used to correct for codec delays. +// The parameter `delay` is used to correct for codec delays. double MseInputOutput(const std::vector& input, const std::vector& output, size_t num_samples, @@ -153,10 +153,10 @@ class AudioDecoderTest : public ::testing::Test { } // Encodes and decodes audio. The absolute difference between the input and - // output is compared vs |tolerance|, and the mean-squared error is compared - // with |mse|. The encoded stream should contain |expected_bytes|. For stereo + // output is compared vs `tolerance`, and the mean-squared error is compared + // with `mse`. The encoded stream should contain `expected_bytes`. For stereo // audio, the absolute difference between the two channels is compared vs - // |channel_diff_tolerance|. + // `channel_diff_tolerance`. void EncodeDecodeTest(size_t expected_bytes, int tolerance, double mse, @@ -171,7 +171,7 @@ class AudioDecoderTest : public ::testing::Test { std::vector input; std::vector decoded; while (processed_samples + frame_size_ <= data_length_) { - // Extend input vector with |frame_size_|. + // Extend input vector with `frame_size_`. input.resize(input.size() + frame_size_, 0); // Read from input file. ASSERT_GE(input.size() - processed_samples, frame_size_); @@ -581,16 +581,22 @@ TEST_F(AudioDecoderIsacSwbTest, SetTargetBitrate) { 56001 + overhead_rate)); } +// Run bit exactness test only for release builds. +#if defined(NDEBUG) TEST_F(AudioDecoderIsacFixTest, EncodeDecode) { int tolerance = 11034; double mse = 3.46e6; int delay = 54; // Delay from input to output. #if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM) static const int kEncodedBytes = 685; -#elif defined(WEBRTC_ARCH_ARM64) - static const int kEncodedBytes = 673; #elif defined(WEBRTC_MAC) && defined(WEBRTC_ARCH_ARM64) // M1 Mac static const int kEncodedBytes = 673; +#elif defined(WEBRTC_ARCH_ARM64) + static const int kEncodedBytes = 673; +#elif defined(WEBRTC_WIN) && defined(_MSC_VER) && !defined(__clang__) + static const int kEncodedBytes = 671; +#elif defined(WEBRTC_IOS) && defined(WEBRTC_ARCH_X86_64) + static const int kEncodedBytes = 671; #else static const int kEncodedBytes = 671; #endif @@ -598,6 +604,7 @@ TEST_F(AudioDecoderIsacFixTest, EncodeDecode) { ReInitTest(); EXPECT_FALSE(decoder_->HasDecodePlc()); } +#endif TEST_F(AudioDecoderIsacFixTest, SetTargetBitrate) { const int overhead_rate = diff --git a/modules/audio_coding/neteq/audio_multi_vector.cc b/modules/audio_coding/neteq/audio_multi_vector.cc index 290d7eae22..220d5a17d7 100644 --- a/modules/audio_coding/neteq/audio_multi_vector.cc +++ b/modules/audio_coding/neteq/audio_multi_vector.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/neteq/audio_multi_vector.h" -#include #include @@ -78,8 +77,8 @@ void AudioMultiVector::PushBackInterleaved( size_t length_per_channel = append_this.size() / num_channels_; int16_t* temp_array = new int16_t[length_per_channel]; // Temporary storage. for (size_t channel = 0; channel < num_channels_; ++channel) { - // Copy elements to |temp_array|. - // Set |source_ptr| to first element of this channel. + // Copy elements to `temp_array`. + // Set `source_ptr` to first element of this channel. const int16_t* source_ptr = &append_this[channel]; for (size_t i = 0; i < length_per_channel; ++i) { temp_array[i] = *source_ptr; @@ -133,7 +132,7 @@ size_t AudioMultiVector::ReadInterleavedFromIndex(size_t start_index, size_t length, int16_t* destination) const { RTC_DCHECK(destination); - size_t index = 0; // Number of elements written to |destination| so far. + size_t index = 0; // Number of elements written to `destination` so far. RTC_DCHECK_LE(start_index, Size()); start_index = std::min(start_index, Size()); if (length + start_index > Size()) { @@ -163,7 +162,7 @@ void AudioMultiVector::OverwriteAt(const AudioMultiVector& insert_this, size_t length, size_t position) { RTC_DCHECK_EQ(num_channels_, insert_this.num_channels_); - // Cap |length| at the length of |insert_this|. + // Cap `length` at the length of `insert_this`. RTC_DCHECK_LE(length, insert_this.Size()); length = std::min(length, insert_this.Size()); if (num_channels_ == insert_this.num_channels_) { diff --git a/modules/audio_coding/neteq/audio_multi_vector.h b/modules/audio_coding/neteq/audio_multi_vector.h index 0bb0b28dea..715ec6dfc7 100644 --- a/modules/audio_coding/neteq/audio_multi_vector.h +++ b/modules/audio_coding/neteq/audio_multi_vector.h @@ -18,66 +18,68 @@ #include "api/array_view.h" #include "modules/audio_coding/neteq/audio_vector.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { class AudioMultiVector { public: - // Creates an empty AudioMultiVector with |N| audio channels. |N| must be + // Creates an empty AudioMultiVector with `N` audio channels. `N` must be // larger than 0. explicit AudioMultiVector(size_t N); - // Creates an AudioMultiVector with |N| audio channels, each channel having - // an initial size. |N| must be larger than 0. + // Creates an AudioMultiVector with `N` audio channels, each channel having + // an initial size. `N` must be larger than 0. AudioMultiVector(size_t N, size_t initial_size); virtual ~AudioMultiVector(); + AudioMultiVector(const AudioMultiVector&) = delete; + AudioMultiVector& operator=(const AudioMultiVector&) = delete; + // Deletes all values and make the vector empty. virtual void Clear(); - // Clears the vector and inserts |length| zeros into each channel. + // Clears the vector and inserts `length` zeros into each channel. virtual void Zeros(size_t length); - // Copies all values from this vector to |copy_to|. Any contents in |copy_to| - // are deleted. After the operation is done, |copy_to| will be an exact + // Copies all values from this vector to `copy_to`. Any contents in `copy_to` + // are deleted. After the operation is done, `copy_to` will be an exact // replica of this object. The source and the destination must have the same // number of channels. virtual void CopyTo(AudioMultiVector* copy_to) const; - // Appends the contents of |append_this| to the end of this object. The array + // Appends the contents of `append_this` to the end of this object. The array // is assumed to be channel-interleaved. The length must be an even multiple // of this object's number of channels. The length of this object is increased // with the length of the array divided by the number of channels. void PushBackInterleaved(rtc::ArrayView append_this); - // Appends the contents of AudioMultiVector |append_this| to this object. The - // length of this object is increased with the length of |append_this|. + // Appends the contents of AudioMultiVector `append_this` to this object. The + // length of this object is increased with the length of `append_this`. virtual void PushBack(const AudioMultiVector& append_this); - // Appends the contents of AudioMultiVector |append_this| to this object, - // taken from |index| up until the end of |append_this|. The length of this + // Appends the contents of AudioMultiVector `append_this` to this object, + // taken from `index` up until the end of `append_this`. The length of this // object is increased. virtual void PushBackFromIndex(const AudioMultiVector& append_this, size_t index); - // Removes |length| elements from the beginning of this object, from each + // Removes `length` elements from the beginning of this object, from each // channel. virtual void PopFront(size_t length); - // Removes |length| elements from the end of this object, from each + // Removes `length` elements from the end of this object, from each // channel. virtual void PopBack(size_t length); - // Reads |length| samples from each channel and writes them interleaved to - // |destination|. The total number of elements written to |destination| is - // returned, i.e., |length| * number of channels. If the AudioMultiVector - // contains less than |length| samples per channel, this is reflected in the + // Reads `length` samples from each channel and writes them interleaved to + // `destination`. The total number of elements written to `destination` is + // returned, i.e., `length` * number of channels. If the AudioMultiVector + // contains less than `length` samples per channel, this is reflected in the // return value. virtual size_t ReadInterleaved(size_t length, int16_t* destination) const; - // Like ReadInterleaved() above, but reads from |start_index| instead of from + // Like ReadInterleaved() above, but reads from `start_index` instead of from // the beginning. virtual size_t ReadInterleavedFromIndex(size_t start_index, size_t length, @@ -89,18 +91,18 @@ class AudioMultiVector { int16_t* destination) const; // Overwrites each channel in this AudioMultiVector with values taken from - // |insert_this|. The values are taken from the beginning of |insert_this| and - // are inserted starting at |position|. |length| values are written into each - // channel. If |length| and |position| are selected such that the new data + // `insert_this`. The values are taken from the beginning of `insert_this` and + // are inserted starting at `position`. `length` values are written into each + // channel. If `length` and `position` are selected such that the new data // extends beyond the end of the current AudioVector, the vector is extended - // to accommodate the new data. |length| is limited to the length of - // |insert_this|. + // to accommodate the new data. `length` is limited to the length of + // `insert_this`. virtual void OverwriteAt(const AudioMultiVector& insert_this, size_t length, size_t position); - // Appends |append_this| to the end of the current vector. Lets the two - // vectors overlap by |fade_length| samples (per channel), and cross-fade + // Appends `append_this` to the end of the current vector. Lets the two + // vectors overlap by `fade_length` samples (per channel), and cross-fade // linearly in this region. virtual void CrossFade(const AudioMultiVector& append_this, size_t fade_length); @@ -111,14 +113,14 @@ class AudioMultiVector { // Returns the number of elements per channel in this AudioMultiVector. virtual size_t Size() const; - // Verify that each channel can hold at least |required_size| elements. If + // Verify that each channel can hold at least `required_size` elements. If // not, extend accordingly. virtual void AssertSize(size_t required_size); virtual bool Empty() const; // Copies the data between two channels in the AudioMultiVector. The method - // does not add any new channel. Thus, |from_channel| and |to_channel| must + // does not add any new channel. Thus, `from_channel` and `to_channel` must // both be valid channel numbers. virtual void CopyChannel(size_t from_channel, size_t to_channel); @@ -130,9 +132,6 @@ class AudioMultiVector { protected: std::vector channels_; size_t num_channels_; - - private: - RTC_DISALLOW_COPY_AND_ASSIGN(AudioMultiVector); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/audio_multi_vector_unittest.cc b/modules/audio_coding/neteq/audio_multi_vector_unittest.cc index d1351d8210..329377a18e 100644 --- a/modules/audio_coding/neteq/audio_multi_vector_unittest.cc +++ b/modules/audio_coding/neteq/audio_multi_vector_unittest.cc @@ -94,7 +94,7 @@ TEST_P(AudioMultiVectorTest, PushBackInterleavedAndCopy) { AudioMultiVector vec(num_channels_); vec.PushBackInterleaved(array_interleaved_); AudioMultiVector vec_copy(num_channels_); - vec.CopyTo(&vec_copy); // Copy from |vec| to |vec_copy|. + vec.CopyTo(&vec_copy); // Copy from `vec` to `vec_copy`. ASSERT_EQ(num_channels_, vec.Channels()); ASSERT_EQ(array_length(), vec.Size()); ASSERT_EQ(num_channels_, vec_copy.Channels()); @@ -106,7 +106,7 @@ TEST_P(AudioMultiVectorTest, PushBackInterleavedAndCopy) { } } - // Clear |vec| and verify that it is empty. + // Clear `vec` and verify that it is empty. vec.Clear(); EXPECT_TRUE(vec.Empty()); @@ -208,7 +208,7 @@ TEST_P(AudioMultiVectorTest, PopFront) { vec.PushBackInterleaved(array_interleaved_); vec.PopFront(1); // Remove one element from each channel. ASSERT_EQ(array_length() - 1u, vec.Size()); - // Let |ptr| point to the second element of the first channel in the + // Let `ptr` point to the second element of the first channel in the // interleaved array. int16_t* ptr = &array_interleaved_[num_channels_]; for (size_t i = 0; i < array_length() - 1; ++i) { @@ -227,7 +227,7 @@ TEST_P(AudioMultiVectorTest, PopBack) { vec.PushBackInterleaved(array_interleaved_); vec.PopBack(1); // Remove one element from each channel. ASSERT_EQ(array_length() - 1u, vec.Size()); - // Let |ptr| point to the first element of the first channel in the + // Let `ptr` point to the first element of the first channel in the // interleaved array. int16_t* ptr = array_interleaved_.data(); for (size_t i = 0; i < array_length() - 1; ++i) { diff --git a/modules/audio_coding/neteq/audio_vector.cc b/modules/audio_coding/neteq/audio_vector.cc index 5e435e944d..10e8936447 100644 --- a/modules/audio_coding/neteq/audio_vector.cc +++ b/modules/audio_coding/neteq/audio_vector.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/neteq/audio_vector.h" -#include #include #include @@ -246,14 +245,14 @@ void AudioVector::OverwriteAt(const int16_t* insert_this, void AudioVector::CrossFade(const AudioVector& append_this, size_t fade_length) { - // Fade length cannot be longer than the current vector or |append_this|. + // Fade length cannot be longer than the current vector or `append_this`. RTC_DCHECK_LE(fade_length, Size()); RTC_DCHECK_LE(fade_length, append_this.Size()); fade_length = std::min(fade_length, Size()); fade_length = std::min(fade_length, append_this.Size()); size_t position = Size() - fade_length + begin_index_; // Cross fade the overlapping regions. - // |alpha| is the mixing factor in Q14. + // `alpha` is the mixing factor in Q14. // TODO(hlundin): Consider skipping +1 in the denominator to produce a // smoother cross-fade, in particular at the end of the fade. int alpha_step = 16384 / (static_cast(fade_length) + 1); @@ -266,7 +265,7 @@ void AudioVector::CrossFade(const AudioVector& append_this, 14; } RTC_DCHECK_GE(alpha, 0); // Verify that the slope was correct. - // Append what is left of |append_this|. + // Append what is left of `append_this`. size_t samples_to_push_back = append_this.Size() - fade_length; if (samples_to_push_back > 0) PushBack(append_this, samples_to_push_back, fade_length); @@ -287,8 +286,8 @@ void AudioVector::Reserve(size_t n) { return; const size_t length = Size(); // Reserve one more sample to remove the ambiguity between empty vector and - // full vector. Therefore |begin_index_| == |end_index_| indicates empty - // vector, and |begin_index_| == (|end_index_| + 1) % capacity indicates + // full vector. Therefore `begin_index_` == `end_index_` indicates empty + // vector, and `begin_index_` == (`end_index_` + 1) % capacity indicates // full vector. std::unique_ptr temp_array(new int16_t[n + 1]); CopyTo(length, 0, temp_array.get()); diff --git a/modules/audio_coding/neteq/audio_vector.h b/modules/audio_coding/neteq/audio_vector.h index a257586da4..d68f3ec6be 100644 --- a/modules/audio_coding/neteq/audio_vector.h +++ b/modules/audio_coding/neteq/audio_vector.h @@ -17,7 +17,6 @@ #include #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -31,30 +30,33 @@ class AudioVector { virtual ~AudioVector(); + AudioVector(const AudioVector&) = delete; + AudioVector& operator=(const AudioVector&) = delete; + // Deletes all values and make the vector empty. virtual void Clear(); - // Copies all values from this vector to |copy_to|. Any contents in |copy_to| + // Copies all values from this vector to `copy_to`. Any contents in `copy_to` // are deleted before the copy operation. After the operation is done, - // |copy_to| will be an exact replica of this object. + // `copy_to` will be an exact replica of this object. virtual void CopyTo(AudioVector* copy_to) const; - // Copies |length| values from |position| in this vector to |copy_to|. + // Copies `length` values from `position` in this vector to `copy_to`. virtual void CopyTo(size_t length, size_t position, int16_t* copy_to) const; - // Prepends the contents of AudioVector |prepend_this| to this object. The - // length of this object is increased with the length of |prepend_this|. + // Prepends the contents of AudioVector `prepend_this` to this object. The + // length of this object is increased with the length of `prepend_this`. virtual void PushFront(const AudioVector& prepend_this); - // Same as above, but with an array |prepend_this| with |length| elements as + // Same as above, but with an array `prepend_this` with `length` elements as // source. virtual void PushFront(const int16_t* prepend_this, size_t length); // Same as PushFront but will append to the end of this object. virtual void PushBack(const AudioVector& append_this); - // Appends a segment of |append_this| to the end of this object. The segment - // starts from |position| and has |length| samples. + // Appends a segment of `append_this` to the end of this object. The segment + // starts from `position` and has `length` samples. virtual void PushBack(const AudioVector& append_this, size_t length, size_t position); @@ -62,47 +64,47 @@ class AudioVector { // Same as PushFront but will append to the end of this object. virtual void PushBack(const int16_t* append_this, size_t length); - // Removes |length| elements from the beginning of this object. + // Removes `length` elements from the beginning of this object. virtual void PopFront(size_t length); - // Removes |length| elements from the end of this object. + // Removes `length` elements from the end of this object. virtual void PopBack(size_t length); - // Extends this object with |extra_length| elements at the end. The new + // Extends this object with `extra_length` elements at the end. The new // elements are initialized to zero. virtual void Extend(size_t extra_length); - // Inserts |length| elements taken from the array |insert_this| and insert - // them at |position|. The length of the AudioVector is increased by |length|. - // |position| = 0 means that the new values are prepended to the vector. - // |position| = Size() means that the new values are appended to the vector. + // Inserts `length` elements taken from the array `insert_this` and insert + // them at `position`. The length of the AudioVector is increased by `length`. + // `position` = 0 means that the new values are prepended to the vector. + // `position` = Size() means that the new values are appended to the vector. virtual void InsertAt(const int16_t* insert_this, size_t length, size_t position); - // Like InsertAt, but inserts |length| zero elements at |position|. + // Like InsertAt, but inserts `length` zero elements at `position`. virtual void InsertZerosAt(size_t length, size_t position); - // Overwrites |length| elements of this AudioVector starting from |position| - // with first values in |AudioVector|. The definition of |position| - // is the same as for InsertAt(). If |length| and |position| are selected + // Overwrites `length` elements of this AudioVector starting from `position` + // with first values in `AudioVector`. The definition of `position` + // is the same as for InsertAt(). If `length` and `position` are selected // such that the new data extends beyond the end of the current AudioVector, // the vector is extended to accommodate the new data. virtual void OverwriteAt(const AudioVector& insert_this, size_t length, size_t position); - // Overwrites |length| elements of this AudioVector with values taken from the - // array |insert_this|, starting at |position|. The definition of |position| - // is the same as for InsertAt(). If |length| and |position| are selected + // Overwrites `length` elements of this AudioVector with values taken from the + // array `insert_this`, starting at `position`. The definition of `position` + // is the same as for InsertAt(). If `length` and `position` are selected // such that the new data extends beyond the end of the current AudioVector, // the vector is extended to accommodate the new data. virtual void OverwriteAt(const int16_t* insert_this, size_t length, size_t position); - // Appends |append_this| to the end of the current vector. Lets the two - // vectors overlap by |fade_length| samples, and cross-fade linearly in this + // Appends `append_this` to the end of the current vector. Lets the two + // vectors overlap by `fade_length` samples, and cross-fade linearly in this // region. virtual void CrossFade(const AudioVector& append_this, size_t fade_length); @@ -158,14 +160,12 @@ class AudioVector { size_t capacity_; // Allocated number of samples in the array. - // The index of the first sample in |array_|, except when + // The index of the first sample in `array_`, except when // |begin_index_ == end_index_|, which indicates an empty buffer. size_t begin_index_; - // The index of the sample after the last sample in |array_|. + // The index of the sample after the last sample in `array_`. size_t end_index_; - - RTC_DISALLOW_COPY_AND_ASSIGN(AudioVector); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/audio_vector_unittest.cc b/modules/audio_coding/neteq/audio_vector_unittest.cc index e39774c4c1..ae9dd88606 100644 --- a/modules/audio_coding/neteq/audio_vector_unittest.cc +++ b/modules/audio_coding/neteq/audio_vector_unittest.cc @@ -62,7 +62,7 @@ TEST_F(AudioVectorTest, PushBackAndCopy) { AudioVector vec; AudioVector vec_copy; vec.PushBack(array_, array_length()); - vec.CopyTo(&vec_copy); // Copy from |vec| to |vec_copy|. + vec.CopyTo(&vec_copy); // Copy from `vec` to `vec_copy`. ASSERT_EQ(array_length(), vec.Size()); ASSERT_EQ(array_length(), vec_copy.Size()); for (size_t i = 0; i < array_length(); ++i) { @@ -70,7 +70,7 @@ TEST_F(AudioVectorTest, PushBackAndCopy) { EXPECT_EQ(array_[i], vec_copy[i]); } - // Clear |vec| and verify that it is empty. + // Clear `vec` and verify that it is empty. vec.Clear(); EXPECT_TRUE(vec.Empty()); @@ -178,8 +178,8 @@ TEST_F(AudioVectorTest, InsertAt) { int insert_position = 5; vec.InsertAt(new_array, kNewLength, insert_position); // Verify that the vector looks as follows: - // {0, 1, ..., |insert_position| - 1, 100, 101, ..., 100 + kNewLength - 1, - // |insert_position|, |insert_position| + 1, ..., kLength - 1}. + // {0, 1, ..., `insert_position` - 1, 100, 101, ..., 100 + kNewLength - 1, + // `insert_position`, `insert_position` + 1, ..., kLength - 1}. size_t pos = 0; for (int i = 0; i < insert_position; ++i) { EXPECT_EQ(array_[i], vec[pos]); @@ -309,8 +309,8 @@ TEST_F(AudioVectorTest, OverwriteAt) { size_t insert_position = 2; vec.OverwriteAt(new_array, kNewLength, insert_position); // Verify that the vector looks as follows: - // {0, ..., |insert_position| - 1, 100, 101, ..., 100 + kNewLength - 1, - // |insert_position|, |insert_position| + 1, ..., kLength - 1}. + // {0, ..., `insert_position` - 1, 100, 101, ..., 100 + kNewLength - 1, + // `insert_position`, `insert_position` + 1, ..., kLength - 1}. size_t pos = 0; for (pos = 0; pos < insert_position; ++pos) { EXPECT_EQ(array_[pos], vec[pos]); @@ -340,8 +340,8 @@ TEST_F(AudioVectorTest, OverwriteBeyondEnd) { vec.OverwriteAt(new_array, kNewLength, insert_position); ASSERT_EQ(array_length() - 2u + kNewLength, vec.Size()); // Verify that the vector looks as follows: - // {0, ..., |insert_position| - 1, 100, 101, ..., 100 + kNewLength - 1, - // |insert_position|, |insert_position| + 1, ..., kLength - 1}. + // {0, ..., `insert_position` - 1, 100, 101, ..., 100 + kNewLength - 1, + // `insert_position`, `insert_position` + 1, ..., kLength - 1}. int pos = 0; for (pos = 0; pos < insert_position; ++pos) { EXPECT_EQ(array_[pos], vec[pos]); @@ -350,7 +350,7 @@ TEST_F(AudioVectorTest, OverwriteBeyondEnd) { EXPECT_EQ(new_array[i], vec[pos]); ++pos; } - // Verify that we checked to the end of |vec|. + // Verify that we checked to the end of `vec`. EXPECT_EQ(vec.Size(), static_cast(pos)); } @@ -359,7 +359,7 @@ TEST_F(AudioVectorTest, CrossFade) { static const size_t kFadeLength = 10; AudioVector vec1(kLength); AudioVector vec2(kLength); - // Set all vector elements to 0 in |vec1| and 100 in |vec2|. + // Set all vector elements to 0 in `vec1` and 100 in `vec2`. for (size_t i = 0; i < kLength; ++i) { vec1[i] = 0; vec2[i] = 100; diff --git a/modules/audio_coding/neteq/background_noise.cc b/modules/audio_coding/neteq/background_noise.cc index ae4645c78e..2c95d3b390 100644 --- a/modules/audio_coding/neteq/background_noise.cc +++ b/modules/audio_coding/neteq/background_noise.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/neteq/background_noise.h" -#include #include // memcpy #include // min, max @@ -109,8 +108,8 @@ bool BackgroundNoise::Update(const AudioMultiVector& input, if ((sample_energy > 0) && (int64_t{5} * residual_energy >= int64_t{16} * sample_energy)) { // Spectrum is flat enough; save filter parameters. - // |temp_signal| + |kVecLen| - |kMaxLpcOrder| points at the first of the - // |kMaxLpcOrder| samples in the residual signal, which will form the + // `temp_signal` + `kVecLen` - `kMaxLpcOrder` points at the first of the + // `kMaxLpcOrder` samples in the residual signal, which will form the // filter state for the next noise generation. SaveParameters(channel_ix, lpc_coefficients, temp_signal + kVecLen - kMaxLpcOrder, sample_energy, @@ -118,7 +117,7 @@ bool BackgroundNoise::Update(const AudioMultiVector& input, filter_params_saved = true; } } else { - // Will only happen if post-decode VAD is disabled and |sample_energy| is + // Will only happen if post-decode VAD is disabled and `sample_energy` is // not low enough. Increase the threshold for update so that it increases // by a factor 4 in 4 seconds. IncrementEnergyThreshold(channel_ix, sample_energy); @@ -265,8 +264,8 @@ void BackgroundNoise::IncrementEnergyThreshold(size_t channel, parameters.max_energy = sample_energy; } - // Set |energy_update_threshold| to no less than 60 dB lower than - // |max_energy_|. Adding 524288 assures proper rounding. + // Set `energy_update_threshold` to no less than 60 dB lower than + // `max_energy_`. Adding 524288 assures proper rounding. int32_t energy_update_threshold = (parameters.max_energy + 524288) >> 20; if (energy_update_threshold > parameters.energy_update_threshold) { parameters.energy_update_threshold = energy_update_threshold; @@ -298,9 +297,9 @@ void BackgroundNoise::SaveParameters(size_t channel, // Calculate scale and shift factor. parameters.scale = static_cast(WebRtcSpl_SqrtFloor(residual_energy)); - // Add 13 to the |scale_shift_|, since the random numbers table is in + // Add 13 to the `scale_shift_`, since the random numbers table is in // Q13. - // TODO(hlundin): Move the "13" to where the |scale_shift_| is used? + // TODO(hlundin): Move the "13" to where the `scale_shift_` is used? parameters.scale_shift = static_cast(13 + ((kLogResidualLength + norm_shift) / 2)); diff --git a/modules/audio_coding/neteq/background_noise.h b/modules/audio_coding/neteq/background_noise.h index 631db0d99f..8e6d5890a0 100644 --- a/modules/audio_coding/neteq/background_noise.h +++ b/modules/audio_coding/neteq/background_noise.h @@ -16,7 +16,6 @@ #include #include "api/array_view.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -34,15 +33,18 @@ class BackgroundNoise { explicit BackgroundNoise(size_t num_channels); virtual ~BackgroundNoise(); + BackgroundNoise(const BackgroundNoise&) = delete; + BackgroundNoise& operator=(const BackgroundNoise&) = delete; + void Reset(); // Updates the parameter estimates based on the signal currently in the - // |sync_buffer|, and on the latest decision in |vad| if it is running. + // `sync_buffer`, and on the latest decision in `vad` if it is running. // Returns true if the filter parameters are updated. bool Update(const AudioMultiVector& sync_buffer, const PostDecodeVad& vad); // Generates background noise given a random vector and writes the output to - // |buffer|. + // `buffer`. void GenerateBackgroundNoise(rtc::ArrayView random_vector, size_t channel, int mute_slope, @@ -50,29 +52,29 @@ class BackgroundNoise { size_t num_noise_samples, int16_t* buffer); - // Returns |energy_| for |channel|. + // Returns `energy_` for `channel`. int32_t Energy(size_t channel) const; - // Sets the value of |mute_factor_| for |channel| to |value|. + // Sets the value of `mute_factor_` for `channel` to `value`. void SetMuteFactor(size_t channel, int16_t value); - // Returns |mute_factor_| for |channel|. + // Returns `mute_factor_` for `channel`. int16_t MuteFactor(size_t channel) const; - // Returns a pointer to |filter_| for |channel|. + // Returns a pointer to `filter_` for `channel`. const int16_t* Filter(size_t channel) const; - // Returns a pointer to |filter_state_| for |channel|. + // Returns a pointer to `filter_state_` for `channel`. const int16_t* FilterState(size_t channel) const; - // Copies |input| to the filter state. Will not copy more than |kMaxLpcOrder| + // Copies `input` to the filter state. Will not copy more than `kMaxLpcOrder` // elements. void SetFilterState(size_t channel, rtc::ArrayView input); - // Returns |scale_| for |channel|. + // Returns `scale_` for `channel`. int16_t Scale(size_t channel) const; - // Returns |scale_shift_| for |channel|. + // Returns `scale_shift_` for `channel`. int16_t ScaleShift(size_t channel) const; // Accessors. @@ -117,7 +119,7 @@ class BackgroundNoise { size_t length, int32_t* auto_correlation) const; - // Increments the energy threshold by a factor 1 + |kThresholdIncrement|. + // Increments the energy threshold by a factor 1 + `kThresholdIncrement`. void IncrementEnergyThreshold(size_t channel, int32_t sample_energy); // Updates the filter parameters. @@ -130,8 +132,6 @@ class BackgroundNoise { size_t num_channels_; std::unique_ptr channel_parameters_; bool initialized_; - - RTC_DISALLOW_COPY_AND_ASSIGN(BackgroundNoise); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/buffer_level_filter.cc b/modules/audio_coding/neteq/buffer_level_filter.cc index 8901c01f77..0ccc7bb53d 100644 --- a/modules/audio_coding/neteq/buffer_level_filter.cc +++ b/modules/audio_coding/neteq/buffer_level_filter.cc @@ -30,10 +30,10 @@ void BufferLevelFilter::Reset() { void BufferLevelFilter::Update(size_t buffer_size_samples, int time_stretched_samples) { // Filter: - // |filtered_current_level_| = |level_factor_| * |filtered_current_level_| + - // (1 - |level_factor_|) * |buffer_size_samples| - // |level_factor_| and |filtered_current_level_| are in Q8. - // |buffer_size_samples| is in Q0. + // `filtered_current_level_` = `level_factor_` * `filtered_current_level_` + + // (1 - `level_factor_`) * `buffer_size_samples` + // `level_factor_` and `filtered_current_level_` are in Q8. + // `buffer_size_samples` is in Q0. const int64_t filtered_current_level = (level_factor_ * int64_t{filtered_current_level_} >> 8) + (256 - level_factor_) * rtc::dchecked_cast(buffer_size_samples); diff --git a/modules/audio_coding/neteq/buffer_level_filter.h b/modules/audio_coding/neteq/buffer_level_filter.h index 218a142648..ced36da9c2 100644 --- a/modules/audio_coding/neteq/buffer_level_filter.h +++ b/modules/audio_coding/neteq/buffer_level_filter.h @@ -14,18 +14,20 @@ #include #include -#include "rtc_base/constructor_magic.h" - namespace webrtc { class BufferLevelFilter { public: BufferLevelFilter(); virtual ~BufferLevelFilter() {} + + BufferLevelFilter(const BufferLevelFilter&) = delete; + BufferLevelFilter& operator=(const BufferLevelFilter&) = delete; + virtual void Reset(); - // Updates the filter. Current buffer size is |buffer_size_samples|. - // |time_stretched_samples| is subtracted from the filtered value (thus + // Updates the filter. Current buffer size is `buffer_size_samples`. + // `time_stretched_samples` is subtracted from the filtered value (thus // bypassing the filter operation). virtual void Update(size_t buffer_size_samples, int time_stretched_samples); @@ -46,8 +48,6 @@ class BufferLevelFilter { private: int level_factor_; // Filter factor for the buffer level filter in Q8. int filtered_current_level_; // Filtered current buffer level in Q8. - - RTC_DISALLOW_COPY_AND_ASSIGN(BufferLevelFilter); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/buffer_level_filter_unittest.cc b/modules/audio_coding/neteq/buffer_level_filter_unittest.cc index 63fc83be67..6773e96f58 100644 --- a/modules/audio_coding/neteq/buffer_level_filter_unittest.cc +++ b/modules/audio_coding/neteq/buffer_level_filter_unittest.cc @@ -38,7 +38,7 @@ TEST(BufferLevelFilter, ConvergenceTest) { filter.Update(value, 0 /* time_stretched_samples */); } // Expect the filtered value to be (theoretically) - // (1 - (251/256) ^ |times|) * |value|. + // (1 - (251/256) ^ `times`) * `value`. double expected_value_double = (1 - pow(251.0 / 256.0, times)) * value; int expected_value = static_cast(expected_value_double); @@ -62,7 +62,7 @@ TEST(BufferLevelFilter, FilterFactor) { filter.Update(kValue, 0 /* time_stretched_samples */); } // Expect the filtered value to be - // (1 - (252/256) ^ |kTimes|) * |kValue|. + // (1 - (252/256) ^ `kTimes`) * `kValue`. int expected_value = 15; EXPECT_EQ(expected_value, filter.filtered_current_level()); @@ -72,7 +72,7 @@ TEST(BufferLevelFilter, FilterFactor) { filter.Update(kValue, 0 /* time_stretched_samples */); } // Expect the filtered value to be - // (1 - (253/256) ^ |kTimes|) * |kValue|. + // (1 - (253/256) ^ `kTimes`) * `kValue`. expected_value = 11; EXPECT_EQ(expected_value, filter.filtered_current_level()); @@ -82,7 +82,7 @@ TEST(BufferLevelFilter, FilterFactor) { filter.Update(kValue, 0 /* time_stretched_samples */); } // Expect the filtered value to be - // (1 - (254/256) ^ |kTimes|) * |kValue|. + // (1 - (254/256) ^ `kTimes`) * `kValue`. expected_value = 8; EXPECT_EQ(expected_value, filter.filtered_current_level()); } @@ -98,13 +98,13 @@ TEST(BufferLevelFilter, TimeStretchedSamples) { filter.Update(kValue, 0); } // Expect the filtered value to be - // (1 - (251/256) ^ |kTimes|) * |kValue|. + // (1 - (251/256) ^ `kTimes`) * `kValue`. const int kExpectedValue = 18; EXPECT_EQ(kExpectedValue, filter.filtered_current_level()); // Update filter again, now with non-zero value for packet length. // Set the current filtered value to be the input, in order to isolate the - // impact of |kTimeStretchedSamples|. + // impact of `kTimeStretchedSamples`. filter.Update(filter.filtered_current_level(), kTimeStretchedSamples); EXPECT_EQ(kExpectedValue - kTimeStretchedSamples, filter.filtered_current_level()); diff --git a/modules/audio_coding/neteq/comfort_noise.cc b/modules/audio_coding/neteq/comfort_noise.cc index b02e3d747f..a2ce888f45 100644 --- a/modules/audio_coding/neteq/comfort_noise.cc +++ b/modules/audio_coding/neteq/comfort_noise.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/neteq/comfort_noise.h" -#include #include #include @@ -120,8 +119,8 @@ int ComfortNoise::Generate(size_t requested_length, AudioMultiVector* output) { muting_window += muting_window_increment; unmuting_window += unmuting_window_increment; } - // Remove |overlap_length_| samples from the front of |output| since they - // were mixed into |sync_buffer_| above. + // Remove `overlap_length_` samples from the front of `output` since they + // were mixed into `sync_buffer_` above. output->PopFront(overlap_length_); } first_call_ = false; diff --git a/modules/audio_coding/neteq/comfort_noise.h b/modules/audio_coding/neteq/comfort_noise.h index f748772710..31fcee31d0 100644 --- a/modules/audio_coding/neteq/comfort_noise.h +++ b/modules/audio_coding/neteq/comfort_noise.h @@ -13,8 +13,6 @@ #include -#include "rtc_base/constructor_magic.h" - namespace webrtc { // Forward declarations. @@ -42,14 +40,17 @@ class ComfortNoise { decoder_database_(decoder_database), sync_buffer_(sync_buffer) {} + ComfortNoise(const ComfortNoise&) = delete; + ComfortNoise& operator=(const ComfortNoise&) = delete; + // Resets the state. Should be called before each new comfort noise period. void Reset(); - // Update the comfort noise generator with the parameters in |packet|. + // Update the comfort noise generator with the parameters in `packet`. int UpdateParameters(const Packet& packet); - // Generates |requested_length| samples of comfort noise and writes to - // |output|. If this is the first in call after Reset (or first after creating + // Generates `requested_length` samples of comfort noise and writes to + // `output`. If this is the first in call after Reset (or first after creating // the object), it will also mix in comfort noise at the end of the // SyncBuffer object provided in the constructor. int Generate(size_t requested_length, AudioMultiVector* output); @@ -65,7 +66,6 @@ class ComfortNoise { DecoderDatabase* decoder_database_; SyncBuffer* sync_buffer_; int internal_error_code_; - RTC_DISALLOW_COPY_AND_ASSIGN(ComfortNoise); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/comfort_noise_unittest.cc b/modules/audio_coding/neteq/comfort_noise_unittest.cc index b3fbb4eb06..b436800061 100644 --- a/modules/audio_coding/neteq/comfort_noise_unittest.cc +++ b/modules/audio_coding/neteq/comfort_noise_unittest.cc @@ -23,7 +23,7 @@ TEST(ComfortNoise, CreateAndDestroy) { MockDecoderDatabase db; SyncBuffer sync_buffer(1, 1000); ComfortNoise cn(fs, &db, &sync_buffer); - EXPECT_CALL(db, Die()); // Called when |db| goes out of scope. + EXPECT_CALL(db, Die()); // Called when `db` goes out of scope. } // TODO(hlundin): Write more tests. diff --git a/modules/audio_coding/neteq/cross_correlation.h b/modules/audio_coding/neteq/cross_correlation.h index 9ce8be83ae..5082ce6a30 100644 --- a/modules/audio_coding/neteq/cross_correlation.h +++ b/modules/audio_coding/neteq/cross_correlation.h @@ -17,19 +17,19 @@ namespace webrtc { // The function calculates the cross-correlation between two sequences -// |sequence_1| and |sequence_2|. |sequence_1| is taken as reference, with -// |sequence_1_length| as its length. |sequence_2| slides for the calculation of -// cross-correlation. The result will be saved in |cross_correlation|. -// |cross_correlation_length| correlation points are calculated. +// `sequence_1` and `sequence_2`. `sequence_1` is taken as reference, with +// `sequence_1_length` as its length. `sequence_2` slides for the calculation of +// cross-correlation. The result will be saved in `cross_correlation`. +// `cross_correlation_length` correlation points are calculated. // The corresponding lag starts from 0, and increases with a step of -// |cross_correlation_step|. The result is without normalization. To avoid +// `cross_correlation_step`. The result is without normalization. To avoid // overflow, the result will be right shifted. The amount of shifts will be // returned. // // Input: // - sequence_1 : First sequence (reference). // - sequence_2 : Second sequence (sliding during calculation). -// - sequence_1_length : Length of |sequence_1|. +// - sequence_1_length : Length of `sequence_1`. // - cross_correlation_length : Number of cross-correlations to calculate. // - cross_correlation_step : Step in the lag for the cross-correlation. // diff --git a/modules/audio_coding/neteq/decision_logic.cc b/modules/audio_coding/neteq/decision_logic.cc index d702729881..30463fcc49 100644 --- a/modules/audio_coding/neteq/decision_logic.cc +++ b/modules/audio_coding/neteq/decision_logic.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/neteq/decision_logic.h" -#include #include #include @@ -23,21 +22,28 @@ #include "rtc_base/numerics/safe_conversions.h" #include "system_wrappers/include/field_trial.h" +namespace webrtc { + namespace { constexpr int kPostponeDecodingLevel = 50; constexpr int kDefaultTargetLevelWindowMs = 100; constexpr int kDecelerationTargetLevelOffsetMs = 85; -} // namespace +std::unique_ptr CreateDelayManager( + const NetEqController::Config& neteq_config) { + DelayManager::Config config; + config.max_packets_in_buffer = neteq_config.max_packets_in_buffer; + config.base_minimum_delay_ms = neteq_config.base_min_delay_ms; + config.Log(); + return std::make_unique(config, neteq_config.tick_timer); +} -namespace webrtc { +} // namespace DecisionLogic::DecisionLogic(NetEqController::Config config) : DecisionLogic(config, - DelayManager::Create(config.max_packets_in_buffer, - config.base_min_delay_ms, - config.tick_timer), + CreateDelayManager(config), std::make_unique()) {} DecisionLogic::DecisionLogic( @@ -310,8 +316,8 @@ NetEq::Operation DecisionLogic::ExpectedPacketAvailable(NetEq::Mode prev_mode, std::max(target_level_samples * 3 / 4, target_level_samples - kDecelerationTargetLevelOffsetMs * samples_per_ms); - // |higher_limit| is equal to |target_level|, but should at - // least be 20 ms higher than |lower_limit|. + // `higher_limit` is equal to `target_level`, but should at + // least be 20 ms higher than `lower_limit`. const int high_limit = std::max(target_level_samples, low_limit + 20 * samples_per_ms); diff --git a/modules/audio_coding/neteq/decision_logic.h b/modules/audio_coding/neteq/decision_logic.h index 8be4511419..a8571ade96 100644 --- a/modules/audio_coding/neteq/decision_logic.h +++ b/modules/audio_coding/neteq/decision_logic.h @@ -18,7 +18,6 @@ #include "api/neteq/tick_timer.h" #include "modules/audio_coding/neteq/buffer_level_filter.h" #include "modules/audio_coding/neteq/delay_manager.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/experiments/field_trial_parser.h" namespace webrtc { @@ -37,6 +36,9 @@ class DecisionLogic : public NetEqController { ~DecisionLogic() override; + DecisionLogic(const DecisionLogic&) = delete; + DecisionLogic& operator=(const DecisionLogic&) = delete; + // Resets object to a clean state. void Reset() override; @@ -47,23 +49,23 @@ class DecisionLogic : public NetEqController { void SetSampleRate(int fs_hz, size_t output_size_samples) override; // Given info about the latest received packet, and current jitter buffer - // status, returns the operation. |target_timestamp| and |expand_mutefactor| - // are provided for reference. |last_packet_samples| is the number of samples + // status, returns the operation. `target_timestamp` and `expand_mutefactor` + // are provided for reference. `last_packet_samples` is the number of samples // obtained from the last decoded frame. If there is a packet available, it - // should be supplied in |packet|; otherwise it should be NULL. The mode + // should be supplied in `packet`; otherwise it should be NULL. The mode // resulting from the last call to NetEqImpl::GetAudio is supplied in - // |last_mode|. If there is a DTMF event to play, |play_dtmf| should be set to - // true. The output variable |reset_decoder| will be set to true if a reset is + // `last_mode`. If there is a DTMF event to play, `play_dtmf` should be set to + // true. The output variable `reset_decoder` will be set to true if a reset is // required; otherwise it is left unchanged (i.e., it can remain true if it // was true before the call). NetEq::Operation GetDecision(const NetEqController::NetEqStatus& status, bool* reset_decoder) override; - // These methods test the |cng_state_| for different conditions. + // These methods test the `cng_state_` for different conditions. bool CngRfc3389On() const override { return cng_state_ == kCngRfc3389On; } bool CngOff() const override { return cng_state_ == kCngOff; } - // Resets the |cng_state_| to kCngOff. + // Resets the `cng_state_` to kCngOff. void SetCngOff() override { cng_state_ = kCngOff; } // Reports back to DecisionLogic whether the decision to do expand remains or @@ -72,7 +74,7 @@ class DecisionLogic : public NetEqController { // sync buffer. void ExpandDecision(NetEq::Operation operation) override; - // Adds |value| to |sample_memory_|. + // Adds `value` to `sample_memory_`. void AddSampleMemory(int32_t value) override { sample_memory_ += value; } int TargetLevelMs() const override { return delay_manager_->TargetDelayMs(); } @@ -120,8 +122,8 @@ class DecisionLogic : public NetEqController { enum CngState { kCngOff, kCngRfc3389On, kCngInternalOn }; - // Updates the |buffer_level_filter_| with the current buffer level - // |buffer_size_samples|. + // Updates the `buffer_level_filter_` with the current buffer level + // `buffer_size_samples`. void FilterBufferLevel(size_t buffer_size_samples); // Returns the operation given that the next available packet is a comfort @@ -132,7 +134,7 @@ class DecisionLogic : public NetEqController { size_t generated_noise_samples); // Returns the operation given that no packets are available (except maybe - // a DTMF event, flagged by setting |play_dtmf| true). + // a DTMF event, flagged by setting `play_dtmf` true). virtual NetEq::Operation NoPacket(bool play_dtmf); // Returns the operation to do given that the expected packet is available. @@ -160,13 +162,13 @@ class DecisionLogic : public NetEqController { // Checks if the current (filtered) buffer level is under the target level. bool UnderTargetLevel() const; - // Checks if |timestamp_leap| is so long into the future that a reset due + // Checks if `timestamp_leap` is so long into the future that a reset due // to exceeding kReinitAfterExpands will be done. bool ReinitAfterExpands(uint32_t timestamp_leap) const; // Checks if we still have not done enough expands to cover the distance from // the last decoded packet to the next available packet, the distance beeing - // conveyed in |timestamp_leap|. + // conveyed in `timestamp_leap`. bool PacketTooEarly(uint32_t timestamp_leap) const; // Checks if num_consecutive_expands_ >= kMaxWaitForPacket. @@ -192,8 +194,6 @@ class DecisionLogic : public NetEqController { FieldTrialParameter estimate_dtx_delay_; FieldTrialParameter time_stretch_cn_; FieldTrialConstrained target_level_window_ms_; - - RTC_DISALLOW_COPY_AND_ASSIGN(DecisionLogic); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/decision_logic_unittest.cc b/modules/audio_coding/neteq/decision_logic_unittest.cc index fc580350d2..b82165389f 100644 --- a/modules/audio_coding/neteq/decision_logic_unittest.cc +++ b/modules/audio_coding/neteq/decision_logic_unittest.cc @@ -61,11 +61,8 @@ class DecisionLogicTest : public ::testing::Test { NetEqController::Config config; config.tick_timer = &tick_timer_; config.allow_time_stretching = true; - std::unique_ptr histogram = - std::make_unique(200, 12345, 2); auto delay_manager = std::make_unique( - 200, 0, 12300, absl::nullopt, 2000, config.tick_timer, - std::move(histogram)); + DelayManager::Config(), config.tick_timer); mock_delay_manager_ = delay_manager.get(); auto buffer_level_filter = std::make_unique(); mock_buffer_level_filter_ = buffer_level_filter.get(); diff --git a/modules/audio_coding/neteq/decoder_database.cc b/modules/audio_coding/neteq/decoder_database.cc index e755e7bb3c..e9176f41f5 100644 --- a/modules/audio_coding/neteq/decoder_database.cc +++ b/modules/audio_coding/neteq/decoder_database.cc @@ -161,7 +161,7 @@ int DecoderDatabase::RegisterPayload(int rtp_payload_type, rtp_payload_type, DecoderInfo(audio_format, codec_pair_id_, decoder_factory_.get()))); if (ret.second == false) { - // Database already contains a decoder with type |rtp_payload_type|. + // Database already contains a decoder with type `rtp_payload_type`. return kDecoderExists; } return kOK; @@ -169,7 +169,7 @@ int DecoderDatabase::RegisterPayload(int rtp_payload_type, int DecoderDatabase::Remove(uint8_t rtp_payload_type) { if (decoders_.erase(rtp_payload_type) == 0) { - // No decoder with that |rtp_payload_type|. + // No decoder with that `rtp_payload_type`. return kDecoderNotFound; } if (active_decoder_type_ == rtp_payload_type) { @@ -199,7 +199,7 @@ const DecoderDatabase::DecoderInfo* DecoderDatabase::GetDecoderInfo( int DecoderDatabase::SetActiveDecoder(uint8_t rtp_payload_type, bool* new_decoder) { - // Check that |rtp_payload_type| exists in the database. + // Check that `rtp_payload_type` exists in the database. const DecoderInfo* info = GetDecoderInfo(rtp_payload_type); if (!info) { // Decoder not found. @@ -231,7 +231,7 @@ AudioDecoder* DecoderDatabase::GetActiveDecoder() const { } int DecoderDatabase::SetActiveCngDecoder(uint8_t rtp_payload_type) { - // Check that |rtp_payload_type| exists in the database. + // Check that `rtp_payload_type` exists in the database. const DecoderInfo* info = GetDecoderInfo(rtp_payload_type); if (!info) { // Decoder not found. diff --git a/modules/audio_coding/neteq/decoder_database.h b/modules/audio_coding/neteq/decoder_database.h index e0a3fe3509..6c2ce54039 100644 --- a/modules/audio_coding/neteq/decoder_database.h +++ b/modules/audio_coding/neteq/decoder_database.h @@ -20,7 +20,6 @@ #include "api/scoped_refptr.h" #include "modules/audio_coding/codecs/cng/webrtc_cng.h" #include "modules/audio_coding/neteq/packet.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -80,15 +79,15 @@ class DecoderDatabase { // Returns true if the decoder's format is RED. bool IsRed() const { return subtype_ == Subtype::kRed; } - // Returns true if the decoder's format is named |name|. + // Returns true if the decoder's format is named `name`. bool IsType(const char* name) const; - // Returns true if the decoder's format is named |name|. + // Returns true if the decoder's format is named `name`. bool IsType(const std::string& name) const; const std::string& get_name() const { return name_; } private: - // TODO(ossu): |name_| is kept here while we retain the old external + // TODO(ossu): `name_` is kept here while we retain the old external // decoder interface. Remove this once using an // AudioDecoderFactory has supplanted the old functionality. const std::string name_; @@ -122,6 +121,9 @@ class DecoderDatabase { virtual ~DecoderDatabase(); + DecoderDatabase(const DecoderDatabase&) = delete; + DecoderDatabase& operator=(const DecoderDatabase&) = delete; + // Returns true if the database is empty. virtual bool Empty() const; @@ -143,26 +145,26 @@ class DecoderDatabase { virtual int RegisterPayload(int rtp_payload_type, const SdpAudioFormat& audio_format); - // Removes the entry for |rtp_payload_type| from the database. + // Removes the entry for `rtp_payload_type` from the database. // Returns kDecoderNotFound or kOK depending on the outcome of the operation. virtual int Remove(uint8_t rtp_payload_type); // Remove all entries. virtual void RemoveAll(); - // Returns a pointer to the DecoderInfo struct for |rtp_payload_type|. If - // no decoder is registered with that |rtp_payload_type|, NULL is returned. + // Returns a pointer to the DecoderInfo struct for `rtp_payload_type`. If + // no decoder is registered with that `rtp_payload_type`, NULL is returned. virtual const DecoderInfo* GetDecoderInfo(uint8_t rtp_payload_type) const; - // Sets the active decoder to be |rtp_payload_type|. If this call results in a - // change of active decoder, |new_decoder| is set to true. The previous active + // Sets the active decoder to be `rtp_payload_type`. If this call results in a + // change of active decoder, `new_decoder` is set to true. The previous active // decoder's AudioDecoder object is deleted. virtual int SetActiveDecoder(uint8_t rtp_payload_type, bool* new_decoder); // Returns the current active decoder, or NULL if no active decoder exists. virtual AudioDecoder* GetActiveDecoder() const; - // Sets the active comfort noise decoder to be |rtp_payload_type|. If this + // Sets the active comfort noise decoder to be `rtp_payload_type`. If this // call results in a change of active comfort noise decoder, the previous // active decoder's AudioDecoder object is deleted. virtual int SetActiveCngDecoder(uint8_t rtp_payload_type); @@ -176,26 +178,26 @@ class DecoderDatabase { // exists. // Returns a pointer to the AudioDecoder object associated with - // |rtp_payload_type|, or NULL if none is registered. If the AudioDecoder + // `rtp_payload_type`, or NULL if none is registered. If the AudioDecoder // object does not exist for that decoder, the object is created. AudioDecoder* GetDecoder(uint8_t rtp_payload_type) const; - // Returns if |rtp_payload_type| is registered with a format named |name|. + // Returns if `rtp_payload_type` is registered with a format named `name`. bool IsType(uint8_t rtp_payload_type, const char* name) const; - // Returns if |rtp_payload_type| is registered with a format named |name|. + // Returns if `rtp_payload_type` is registered with a format named `name`. bool IsType(uint8_t rtp_payload_type, const std::string& name) const; - // Returns true if |rtp_payload_type| is registered as comfort noise. + // Returns true if `rtp_payload_type` is registered as comfort noise. bool IsComfortNoise(uint8_t rtp_payload_type) const; - // Returns true if |rtp_payload_type| is registered as DTMF. + // Returns true if `rtp_payload_type` is registered as DTMF. bool IsDtmf(uint8_t rtp_payload_type) const; - // Returns true if |rtp_payload_type| is registered as RED. + // Returns true if `rtp_payload_type` is registered as RED. bool IsRed(uint8_t rtp_payload_type) const; - // Returns kOK if all packets in |packet_list| carry payload types that are + // Returns kOK if all packets in `packet_list` carry payload types that are // registered in the database. Otherwise, returns kDecoderNotFound. int CheckPayloadTypes(const PacketList& packet_list) const; @@ -208,8 +210,6 @@ class DecoderDatabase { mutable std::unique_ptr active_cng_decoder_; rtc::scoped_refptr decoder_factory_; const absl::optional codec_pair_id_; - - RTC_DISALLOW_COPY_AND_ASSIGN(DecoderDatabase); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/decoder_database_unittest.cc b/modules/audio_coding/neteq/decoder_database_unittest.cc index 33bee8d6f5..f28a0fd412 100644 --- a/modules/audio_coding/neteq/decoder_database_unittest.cc +++ b/modules/audio_coding/neteq/decoder_database_unittest.cc @@ -148,7 +148,7 @@ TEST(DecoderDatabase, CheckPayloadTypes) { } PacketList packet_list; for (int i = 0; i < kNumPayloads + 1; ++i) { - // Create packet with payload type |i|. The last packet will have a payload + // Create packet with payload type `i`. The last packet will have a payload // type that is not registered in the decoder database. Packet packet; packet.payload_type = i; diff --git a/modules/audio_coding/neteq/delay_manager.cc b/modules/audio_coding/neteq/delay_manager.cc index aec80cfa4b..9f6b269b03 100644 --- a/modules/audio_coding/neteq/delay_manager.cc +++ b/modules/audio_coding/neteq/delay_manager.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/neteq/delay_manager.h" -#include #include #include @@ -19,7 +18,6 @@ #include #include -#include "modules/audio_coding/neteq/histogram.h" #include "modules/include/module_common_types_public.h" #include "rtc_base/checks.h" #include "rtc_base/experiments/struct_parameters_parser.h" @@ -33,168 +31,92 @@ namespace { constexpr int kMinBaseMinimumDelayMs = 0; constexpr int kMaxBaseMinimumDelayMs = 10000; -constexpr int kDelayBuckets = 100; -constexpr int kBucketSizeMs = 20; constexpr int kStartDelayMs = 80; -constexpr int kMaxNumReorderedPackets = 5; - -struct DelayManagerConfig { - double quantile = 0.97; - double forget_factor = 0.9993; - absl::optional start_forget_weight = 2; - absl::optional resample_interval_ms; - int max_history_ms = 2000; - - std::unique_ptr Parser() { - return webrtc::StructParametersParser::Create( // - "quantile", &quantile, // - "forget_factor", &forget_factor, // - "start_forget_weight", &start_forget_weight, // - "resample_interval_ms", &resample_interval_ms, // - "max_history_ms", &max_history_ms); - } - - // TODO(jakobi): remove legacy field trial. - void MaybeUpdateFromLegacyFieldTrial() { - constexpr char kDelayHistogramFieldTrial[] = - "WebRTC-Audio-NetEqDelayHistogram"; - if (!webrtc::field_trial::IsEnabled(kDelayHistogramFieldTrial)) { - return; - } - const auto field_trial_string = - webrtc::field_trial::FindFullName(kDelayHistogramFieldTrial); - double percentile = -1.0; - double forget_factor = -1.0; - double start_forget_weight = -1.0; - if (sscanf(field_trial_string.c_str(), "Enabled-%lf-%lf-%lf", &percentile, - &forget_factor, &start_forget_weight) >= 2 && - percentile >= 0.0 && percentile <= 100.0 && forget_factor >= 0.0 && - forget_factor <= 1.0) { - this->quantile = percentile / 100; - this->forget_factor = forget_factor; - this->start_forget_weight = start_forget_weight >= 1 - ? absl::make_optional(start_forget_weight) - : absl::nullopt; - } - } - explicit DelayManagerConfig() { - Parser()->Parse(webrtc::field_trial::FindFullName( - "WebRTC-Audio-NetEqDelayManagerConfig")); - MaybeUpdateFromLegacyFieldTrial(); - RTC_LOG(LS_INFO) << "Delay manager config:" - " quantile=" - << quantile << " forget_factor=" << forget_factor - << " start_forget_weight=" - << start_forget_weight.value_or(0) - << " resample_interval_ms=" - << resample_interval_ms.value_or(0) - << " max_history_ms=" << max_history_ms; +std::unique_ptr MaybeCreateReorderOptimizer( + const DelayManager::Config& config) { + if (!config.use_reorder_optimizer) { + return nullptr; } -}; + return std::make_unique( + (1 << 15) * config.reorder_forget_factor, config.ms_per_loss_percent, + config.start_forget_weight); +} } // namespace -DelayManager::DelayManager(int max_packets_in_buffer, - int base_minimum_delay_ms, - int histogram_quantile, - absl::optional resample_interval_ms, - int max_history_ms, - const TickTimer* tick_timer, - std::unique_ptr histogram) - : first_packet_received_(false), - max_packets_in_buffer_(max_packets_in_buffer), - histogram_(std::move(histogram)), - histogram_quantile_(histogram_quantile), - tick_timer_(tick_timer), - resample_interval_ms_(resample_interval_ms), - max_history_ms_(max_history_ms), - base_minimum_delay_ms_(base_minimum_delay_ms), - effective_minimum_delay_ms_(base_minimum_delay_ms), +DelayManager::Config::Config() { + StructParametersParser::Create( // + "quantile", &quantile, // + "forget_factor", &forget_factor, // + "start_forget_weight", &start_forget_weight, // + "resample_interval_ms", &resample_interval_ms, // + "max_history_ms", &max_history_ms, // + "use_reorder_optimizer", &use_reorder_optimizer, // + "reorder_forget_factor", &reorder_forget_factor, // + "ms_per_loss_percent", &ms_per_loss_percent) + ->Parse(webrtc::field_trial::FindFullName( + "WebRTC-Audio-NetEqDelayManagerConfig")); +} + +void DelayManager::Config::Log() { + RTC_LOG(LS_INFO) << "Delay manager config:" + " quantile=" + << quantile << " forget_factor=" << forget_factor + << " start_forget_weight=" << start_forget_weight.value_or(0) + << " resample_interval_ms=" + << resample_interval_ms.value_or(0) + << " max_history_ms=" << max_history_ms + << " use_reorder_optimizer=" << use_reorder_optimizer + << " reorder_forget_factor=" << reorder_forget_factor + << " ms_per_loss_percent=" << ms_per_loss_percent; +} + +DelayManager::DelayManager(const Config& config, const TickTimer* tick_timer) + : max_packets_in_buffer_(config.max_packets_in_buffer), + underrun_optimizer_(tick_timer, + (1 << 30) * config.quantile, + (1 << 15) * config.forget_factor, + config.start_forget_weight, + config.resample_interval_ms), + reorder_optimizer_(MaybeCreateReorderOptimizer(config)), + relative_arrival_delay_tracker_(tick_timer, config.max_history_ms), + base_minimum_delay_ms_(config.base_minimum_delay_ms), + effective_minimum_delay_ms_(config.base_minimum_delay_ms), minimum_delay_ms_(0), maximum_delay_ms_(0), - target_level_ms_(kStartDelayMs), - last_timestamp_(0) { - RTC_CHECK(histogram_); + target_level_ms_(kStartDelayMs) { RTC_DCHECK_GE(base_minimum_delay_ms_, 0); Reset(); } -std::unique_ptr DelayManager::Create( - int max_packets_in_buffer, - int base_minimum_delay_ms, - const TickTimer* tick_timer) { - DelayManagerConfig config; - int forget_factor_q15 = (1 << 15) * config.forget_factor; - int quantile_q30 = (1 << 30) * config.quantile; - std::unique_ptr histogram = std::make_unique( - kDelayBuckets, forget_factor_q15, config.start_forget_weight); - return std::make_unique( - max_packets_in_buffer, base_minimum_delay_ms, quantile_q30, - config.resample_interval_ms, config.max_history_ms, tick_timer, - std::move(histogram)); -} - DelayManager::~DelayManager() {} absl::optional DelayManager::Update(uint32_t timestamp, int sample_rate_hz, bool reset) { - if (sample_rate_hz <= 0) { - return absl::nullopt; + if (reset) { + relative_arrival_delay_tracker_.Reset(); } - - if (!first_packet_received_ || reset) { - // Restart relative delay esimation from this packet. - delay_history_.clear(); - packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch(); - last_timestamp_ = timestamp; - first_packet_received_ = true; - num_reordered_packets_ = 0; - resample_stopwatch_ = tick_timer_->GetNewStopwatch(); - max_delay_in_interval_ms_ = 0; + absl::optional relative_delay = + relative_arrival_delay_tracker_.Update(timestamp, sample_rate_hz); + if (!relative_delay) { return absl::nullopt; } - const int expected_iat_ms = - 1000ll * static_cast(timestamp - last_timestamp_) / - sample_rate_hz; - const int iat_ms = packet_iat_stopwatch_->ElapsedMs(); - const int iat_delay_ms = iat_ms - expected_iat_ms; - int relative_delay; - bool reordered = !IsNewerTimestamp(timestamp, last_timestamp_); - if (reordered) { - relative_delay = std::max(iat_delay_ms, 0); - } else { - UpdateDelayHistory(iat_delay_ms, timestamp, sample_rate_hz); - relative_delay = CalculateRelativePacketArrivalDelay(); - } - - absl::optional histogram_update; - if (resample_interval_ms_) { - if (static_cast(resample_stopwatch_->ElapsedMs()) > - *resample_interval_ms_) { - histogram_update = max_delay_in_interval_ms_; - resample_stopwatch_ = tick_timer_->GetNewStopwatch(); - max_delay_in_interval_ms_ = 0; - } - max_delay_in_interval_ms_ = - std::max(max_delay_in_interval_ms_, relative_delay); - } else { - histogram_update = relative_delay; + bool reordered = + relative_arrival_delay_tracker_.newest_timestamp() != timestamp; + if (!reorder_optimizer_ || !reordered) { + underrun_optimizer_.Update(*relative_delay); } - if (histogram_update) { - const int index = *histogram_update / kBucketSizeMs; - if (index < histogram_->NumBuckets()) { - // Maximum delay to register is 2000 ms. - histogram_->Add(index); - } + target_level_ms_ = + underrun_optimizer_.GetOptimalDelayMs().value_or(kStartDelayMs); + if (reorder_optimizer_) { + reorder_optimizer_->Update(*relative_delay, reordered, target_level_ms_); + target_level_ms_ = std::max( + target_level_ms_, reorder_optimizer_->GetOptimalDelayMs().value_or(0)); } - - // Calculate new |target_level_ms_| based on updated statistics. - int bucket_index = histogram_->Quantile(histogram_quantile_); - target_level_ms_ = (1 + bucket_index) * kBucketSizeMs; target_level_ms_ = std::max(target_level_ms_, effective_minimum_delay_ms_); if (maximum_delay_ms_ > 0) { target_level_ms_ = std::min(target_level_ms_, maximum_delay_ms_); @@ -207,47 +129,9 @@ absl::optional DelayManager::Update(uint32_t timestamp, target_level_ms_, 3 * max_packets_in_buffer_ * packet_len_ms_ / 4); } - // Prepare for next packet arrival. - if (reordered) { - // Allow a small number of reordered packets before resetting the delay - // estimation. - if (num_reordered_packets_ < kMaxNumReorderedPackets) { - ++num_reordered_packets_; - return relative_delay; - } - delay_history_.clear(); - } - num_reordered_packets_ = 0; - packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch(); - last_timestamp_ = timestamp; return relative_delay; } -void DelayManager::UpdateDelayHistory(int iat_delay_ms, - uint32_t timestamp, - int sample_rate_hz) { - PacketDelay delay; - delay.iat_delay_ms = iat_delay_ms; - delay.timestamp = timestamp; - delay_history_.push_back(delay); - while (timestamp - delay_history_.front().timestamp > - static_cast(max_history_ms_ * sample_rate_hz / 1000)) { - delay_history_.pop_front(); - } -} - -int DelayManager::CalculateRelativePacketArrivalDelay() const { - // This effectively calculates arrival delay of a packet relative to the - // packet preceding the history window. If the arrival delay ever becomes - // smaller than zero, it means the reference packet is invalid, and we - // move the reference. - int relative_delay = 0; - for (const PacketDelay& delay : delay_history_) { - relative_delay += delay.iat_delay_ms; - relative_delay = std::max(relative_delay, 0); - } - return relative_delay; -} int DelayManager::SetPacketAudioLength(int length_ms) { if (length_ms <= 0) { @@ -260,14 +144,12 @@ int DelayManager::SetPacketAudioLength(int length_ms) { void DelayManager::Reset() { packet_len_ms_ = 0; - histogram_->Reset(); - delay_history_.clear(); + underrun_optimizer_.Reset(); + relative_arrival_delay_tracker_.Reset(); target_level_ms_ = kStartDelayMs; - packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch(); - first_packet_received_ = false; - num_reordered_packets_ = 0; - resample_stopwatch_ = tick_timer_->GetNewStopwatch(); - max_delay_in_interval_ms_ = 0; + if (reorder_optimizer_) { + reorder_optimizer_->Reset(); + } } int DelayManager::TargetDelayMs() const { @@ -294,7 +176,7 @@ bool DelayManager::SetMinimumDelay(int delay_ms) { } bool DelayManager::SetMaximumDelay(int delay_ms) { - // If |delay_ms| is zero then it unsets the maximum delay and target level is + // If `delay_ms` is zero then it unsets the maximum delay and target level is // unconstrained by maximum delay. if (delay_ms != 0 && (delay_ms < minimum_delay_ms_ || delay_ms < packet_len_ms_)) { @@ -322,7 +204,7 @@ int DelayManager::GetBaseMinimumDelay() const { } void DelayManager::UpdateEffectiveMinimumDelay() { - // Clamp |base_minimum_delay_ms_| into the range which can be effectively + // Clamp `base_minimum_delay_ms_` into the range which can be effectively // used. const int base_minimum_delay_ms = rtc::SafeClamp(base_minimum_delay_ms_, 0, MinimumDelayUpperBound()); diff --git a/modules/audio_coding/neteq/delay_manager.h b/modules/audio_coding/neteq/delay_manager.h index 9832ceda22..56d108ad11 100644 --- a/modules/audio_coding/neteq/delay_manager.h +++ b/modules/audio_coding/neteq/delay_manager.h @@ -19,35 +19,45 @@ #include "absl/types/optional.h" #include "api/neteq/tick_timer.h" #include "modules/audio_coding/neteq/histogram.h" -#include "rtc_base/constructor_magic.h" +#include "modules/audio_coding/neteq/relative_arrival_delay_tracker.h" +#include "modules/audio_coding/neteq/reorder_optimizer.h" +#include "modules/audio_coding/neteq/underrun_optimizer.h" namespace webrtc { class DelayManager { public: - DelayManager(int max_packets_in_buffer, - int base_minimum_delay_ms, - int histogram_quantile, - absl::optional resample_interval_ms, - int max_history_ms, - const TickTimer* tick_timer, - std::unique_ptr histogram); - - // Create a DelayManager object. Notify the delay manager that the packet - // buffer can hold no more than |max_packets_in_buffer| packets (i.e., this - // is the number of packet slots in the buffer) and that the target delay - // should be greater than or equal to |base_minimum_delay_ms|. Supply a - // PeakDetector object to the DelayManager. - static std::unique_ptr Create(int max_packets_in_buffer, - int base_minimum_delay_ms, - const TickTimer* tick_timer); + struct Config { + Config(); + void Log(); + + // Options that can be configured via field trial. + double quantile = 0.95; + double forget_factor = 0.983; + absl::optional start_forget_weight = 2; + absl::optional resample_interval_ms = 500; + int max_history_ms = 2000; + + bool use_reorder_optimizer = true; + double reorder_forget_factor = 0.9993; + int ms_per_loss_percent = 20; + + // Options that are externally populated. + int max_packets_in_buffer = 200; + int base_minimum_delay_ms = 0; + }; + + DelayManager(const Config& config, const TickTimer* tick_timer); virtual ~DelayManager(); - // Updates the delay manager with a new incoming packet, with |timestamp| from + DelayManager(const DelayManager&) = delete; + DelayManager& operator=(const DelayManager&) = delete; + + // Updates the delay manager with a new incoming packet, with `timestamp` from // the RTP header. This updates the statistics and a new target buffer level // is calculated. Returns the relative delay if it can be calculated. If - // |reset| is true, restarts the relative arrival delay calculation from this + // `reset` is true, restarts the relative arrival delay calculation from this // packet. virtual absl::optional Update(uint32_t timestamp, int sample_rate_hz, @@ -63,7 +73,7 @@ class DelayManager { virtual int SetPacketAudioLength(int length_ms); // Accessors and mutators. - // Assuming |delay| is in valid range. + // Assuming `delay` is in valid range. virtual bool SetMinimumDelay(int delay_ms); virtual bool SetMaximumDelay(int delay_ms); virtual bool SetBaseMinimumDelay(int delay_ms); @@ -73,42 +83,29 @@ class DelayManager { int effective_minimum_delay_ms_for_test() const { return effective_minimum_delay_ms_; } - int histogram_quantile() const { return histogram_quantile_; } - Histogram* histogram() const { return histogram_.get(); } private: // Provides value which minimum delay can't exceed based on current buffer - // size and given |maximum_delay_ms_|. Lower bound is a constant 0. + // size and given `maximum_delay_ms_`. Lower bound is a constant 0. int MinimumDelayUpperBound() const; - // Updates |delay_history_|. - void UpdateDelayHistory(int iat_delay_ms, - uint32_t timestamp, - int sample_rate_hz); - - // Calculate relative packet arrival delay from |delay_history_|. - int CalculateRelativePacketArrivalDelay() const; - - // Updates |effective_minimum_delay_ms_| delay based on current - // |minimum_delay_ms_|, |base_minimum_delay_ms_| and |maximum_delay_ms_| + // Updates `effective_minimum_delay_ms_` delay based on current + // `minimum_delay_ms_`, `base_minimum_delay_ms_` and `maximum_delay_ms_` // and buffer size. void UpdateEffectiveMinimumDelay(); - // Makes sure that |delay_ms| is less than maximum delay, if any maximum - // is set. Also, if possible check |delay_ms| to be less than 75% of - // |max_packets_in_buffer_|. + // Makes sure that `delay_ms` is less than maximum delay, if any maximum + // is set. Also, if possible check `delay_ms` to be less than 75% of + // `max_packets_in_buffer_`. bool IsValidMinimumDelay(int delay_ms) const; bool IsValidBaseMinimumDelay(int delay_ms) const; - bool first_packet_received_; // TODO(jakobi): set maximum buffer delay instead of number of packets. const int max_packets_in_buffer_; - std::unique_ptr histogram_; - const int histogram_quantile_; - const TickTimer* tick_timer_; - const absl::optional resample_interval_ms_; - const int max_history_ms_; + UnderrunOptimizer underrun_optimizer_; + std::unique_ptr reorder_optimizer_; + RelativeArrivalDelayTracker relative_arrival_delay_tracker_; int base_minimum_delay_ms_; int effective_minimum_delay_ms_; // Used as lower bound for target delay. @@ -116,21 +113,7 @@ class DelayManager { int maximum_delay_ms_; // Externally set maximum allowed delay. int packet_len_ms_ = 0; - std::unique_ptr - packet_iat_stopwatch_; // Time elapsed since last packet. - int target_level_ms_; // Currently preferred buffer level. - uint32_t last_timestamp_; // Timestamp for the last received packet. - int num_reordered_packets_ = 0; - int max_delay_in_interval_ms_ = 0; - std::unique_ptr resample_stopwatch_; - - struct PacketDelay { - int iat_delay_ms; - uint32_t timestamp; - }; - std::deque delay_history_; - - RTC_DISALLOW_COPY_AND_ASSIGN(DelayManager); + int target_level_ms_; // Currently preferred buffer level. }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/delay_manager_unittest.cc b/modules/audio_coding/neteq/delay_manager_unittest.cc index 9678151dfa..ee353065ea 100644 --- a/modules/audio_coding/neteq/delay_manager_unittest.cc +++ b/modules/audio_coding/neteq/delay_manager_unittest.cc @@ -28,60 +28,36 @@ namespace webrtc { namespace { -constexpr int kMaxNumberOfPackets = 240; -constexpr int kMinDelayMs = 0; -constexpr int kMaxHistoryMs = 2000; +constexpr int kMaxNumberOfPackets = 200; constexpr int kTimeStepMs = 10; constexpr int kFs = 8000; constexpr int kFrameSizeMs = 20; constexpr int kTsIncrement = kFrameSizeMs * kFs / 1000; constexpr int kMaxBufferSizeMs = kMaxNumberOfPackets * kFrameSizeMs; -constexpr int kDefaultHistogramQuantile = 1020054733; -constexpr int kNumBuckets = 100; -constexpr int kForgetFactor = 32745; + } // namespace class DelayManagerTest : public ::testing::Test { protected: DelayManagerTest(); virtual void SetUp(); - void RecreateDelayManager(); absl::optional InsertNextPacket(); void IncreaseTime(int inc_ms); - std::unique_ptr dm_; TickTimer tick_timer_; - MockStatisticsCalculator stats_; - MockHistogram* mock_histogram_; + DelayManager dm_; uint32_t ts_; - bool use_mock_histogram_ = false; - absl::optional resample_interval_ms_; }; DelayManagerTest::DelayManagerTest() - : dm_(nullptr), - ts_(0x12345678) {} + : dm_(DelayManager::Config(), &tick_timer_), ts_(0x12345678) {} void DelayManagerTest::SetUp() { - RecreateDelayManager(); -} - -void DelayManagerTest::RecreateDelayManager() { - if (use_mock_histogram_) { - mock_histogram_ = new MockHistogram(kNumBuckets, kForgetFactor); - std::unique_ptr histogram(mock_histogram_); - dm_ = std::make_unique(kMaxNumberOfPackets, kMinDelayMs, - kDefaultHistogramQuantile, - resample_interval_ms_, kMaxHistoryMs, - &tick_timer_, std::move(histogram)); - } else { - dm_ = DelayManager::Create(kMaxNumberOfPackets, kMinDelayMs, &tick_timer_); - } - dm_->SetPacketAudioLength(kFrameSizeMs); + dm_.SetPacketAudioLength(kFrameSizeMs); } absl::optional DelayManagerTest::InsertNextPacket() { - auto relative_delay = dm_->Update(ts_, kFs); + auto relative_delay = dm_.Update(ts_, kFs); ts_ += kTsIncrement; return relative_delay; } @@ -98,72 +74,43 @@ TEST_F(DelayManagerTest, CreateAndDestroy) { } TEST_F(DelayManagerTest, UpdateNormal) { - // First packet arrival. - InsertNextPacket(); - // Advance time by one frame size. - IncreaseTime(kFrameSizeMs); - // Second packet arrival. - InsertNextPacket(); - EXPECT_EQ(20, dm_->TargetDelayMs()); -} - -TEST_F(DelayManagerTest, UpdateLongInterArrivalTime) { - // First packet arrival. - InsertNextPacket(); - // Advance time by two frame size. - IncreaseTime(2 * kFrameSizeMs); - // Second packet arrival. - InsertNextPacket(); - EXPECT_EQ(40, dm_->TargetDelayMs()); + for (int i = 0; i < 50; ++i) { + InsertNextPacket(); + IncreaseTime(kFrameSizeMs); + } + EXPECT_EQ(20, dm_.TargetDelayMs()); } TEST_F(DelayManagerTest, MaxDelay) { - const int kExpectedTarget = 5 * kFrameSizeMs; - // First packet arrival. InsertNextPacket(); - // Second packet arrival. - IncreaseTime(kExpectedTarget); + const int kMaxDelayMs = 60; + EXPECT_GT(dm_.TargetDelayMs(), kMaxDelayMs); + EXPECT_TRUE(dm_.SetMaximumDelay(kMaxDelayMs)); InsertNextPacket(); - - // No limit is set. - EXPECT_EQ(kExpectedTarget, dm_->TargetDelayMs()); - - const int kMaxDelayMs = 3 * kFrameSizeMs; - EXPECT_TRUE(dm_->SetMaximumDelay(kMaxDelayMs)); - IncreaseTime(kFrameSizeMs); - InsertNextPacket(); - EXPECT_EQ(kMaxDelayMs, dm_->TargetDelayMs()); + EXPECT_EQ(kMaxDelayMs, dm_.TargetDelayMs()); // Target level at least should be one packet. - EXPECT_FALSE(dm_->SetMaximumDelay(kFrameSizeMs - 1)); + EXPECT_FALSE(dm_.SetMaximumDelay(kFrameSizeMs - 1)); } TEST_F(DelayManagerTest, MinDelay) { - const int kExpectedTarget = 5 * kFrameSizeMs; - // First packet arrival. InsertNextPacket(); - // Second packet arrival. - IncreaseTime(kExpectedTarget); - InsertNextPacket(); - - // No limit is applied. - EXPECT_EQ(kExpectedTarget, dm_->TargetDelayMs()); - int kMinDelayMs = 7 * kFrameSizeMs; - dm_->SetMinimumDelay(kMinDelayMs); + EXPECT_LT(dm_.TargetDelayMs(), kMinDelayMs); + dm_.SetMinimumDelay(kMinDelayMs); IncreaseTime(kFrameSizeMs); InsertNextPacket(); - EXPECT_EQ(kMinDelayMs, dm_->TargetDelayMs()); + EXPECT_EQ(kMinDelayMs, dm_.TargetDelayMs()); } TEST_F(DelayManagerTest, BaseMinimumDelayCheckValidRange) { // Base minimum delay should be between [0, 10000] milliseconds. - EXPECT_FALSE(dm_->SetBaseMinimumDelay(-1)); - EXPECT_FALSE(dm_->SetBaseMinimumDelay(10001)); - EXPECT_EQ(dm_->GetBaseMinimumDelay(), 0); + EXPECT_FALSE(dm_.SetBaseMinimumDelay(-1)); + EXPECT_FALSE(dm_.SetBaseMinimumDelay(10001)); + EXPECT_EQ(dm_.GetBaseMinimumDelay(), 0); - EXPECT_TRUE(dm_->SetBaseMinimumDelay(7999)); - EXPECT_EQ(dm_->GetBaseMinimumDelay(), 7999); + EXPECT_TRUE(dm_.SetBaseMinimumDelay(7999)); + EXPECT_EQ(dm_.GetBaseMinimumDelay(), 7999); } TEST_F(DelayManagerTest, BaseMinimumDelayLowerThanMinimumDelay) { @@ -174,9 +121,9 @@ TEST_F(DelayManagerTest, BaseMinimumDelayLowerThanMinimumDelay) { // minimum delay is lower than minimum delay we use minimum delay. RTC_DCHECK_LT(kBaseMinimumDelayMs, kMinimumDelayMs); - EXPECT_TRUE(dm_->SetBaseMinimumDelay(kBaseMinimumDelayMs)); - EXPECT_TRUE(dm_->SetMinimumDelay(kMinimumDelayMs)); - EXPECT_EQ(dm_->effective_minimum_delay_ms_for_test(), kMinimumDelayMs); + EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMs)); + EXPECT_TRUE(dm_.SetMinimumDelay(kMinimumDelayMs)); + EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kMinimumDelayMs); } TEST_F(DelayManagerTest, BaseMinimumDelayGreaterThanMinimumDelay) { @@ -187,9 +134,9 @@ TEST_F(DelayManagerTest, BaseMinimumDelayGreaterThanMinimumDelay) { // minimum delay is greater than minimum delay we use base minimum delay. RTC_DCHECK_GT(kBaseMinimumDelayMs, kMinimumDelayMs); - EXPECT_TRUE(dm_->SetBaseMinimumDelay(kBaseMinimumDelayMs)); - EXPECT_TRUE(dm_->SetMinimumDelay(kMinimumDelayMs)); - EXPECT_EQ(dm_->effective_minimum_delay_ms_for_test(), kBaseMinimumDelayMs); + EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMs)); + EXPECT_TRUE(dm_.SetMinimumDelay(kMinimumDelayMs)); + EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kBaseMinimumDelayMs); } TEST_F(DelayManagerTest, BaseMinimumDelayGreaterThanBufferSize) { @@ -198,7 +145,7 @@ TEST_F(DelayManagerTest, BaseMinimumDelayGreaterThanBufferSize) { constexpr int kMaximumDelayMs = 20; constexpr int kMaxBufferSizeMsQ75 = 3 * kMaxBufferSizeMs / 4; - EXPECT_TRUE(dm_->SetMaximumDelay(kMaximumDelayMs)); + EXPECT_TRUE(dm_.SetMaximumDelay(kMaximumDelayMs)); // Base minimum delay is greater than minimum delay, that is why we clamp // it to current the highest possible value which is maximum delay. @@ -207,15 +154,15 @@ TEST_F(DelayManagerTest, BaseMinimumDelayGreaterThanBufferSize) { RTC_DCHECK_GT(kBaseMinimumDelayMs, kMaximumDelayMs); RTC_DCHECK_LT(kMaximumDelayMs, kMaxBufferSizeMsQ75); - EXPECT_TRUE(dm_->SetMinimumDelay(kMinimumDelayMs)); - EXPECT_TRUE(dm_->SetBaseMinimumDelay(kBaseMinimumDelayMs)); + EXPECT_TRUE(dm_.SetMinimumDelay(kMinimumDelayMs)); + EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMs)); // Unset maximum value. - EXPECT_TRUE(dm_->SetMaximumDelay(0)); + EXPECT_TRUE(dm_.SetMaximumDelay(0)); // With maximum value unset, the highest possible value now is 75% of // currently possible maximum buffer size. - EXPECT_EQ(dm_->effective_minimum_delay_ms_for_test(), kMaxBufferSizeMsQ75); + EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kMaxBufferSizeMsQ75); } TEST_F(DelayManagerTest, BaseMinimumDelayGreaterThanMaximumDelay) { @@ -229,10 +176,10 @@ TEST_F(DelayManagerTest, BaseMinimumDelayGreaterThanMaximumDelay) { RTC_DCHECK_GT(kBaseMinimumDelayMs, kMaximumDelayMs); RTC_DCHECK_LT(kMaximumDelayMs, kMaxBufferSizeMs); - EXPECT_TRUE(dm_->SetMaximumDelay(kMaximumDelayMs)); - EXPECT_TRUE(dm_->SetMinimumDelay(kMinimumDelayMs)); - EXPECT_TRUE(dm_->SetBaseMinimumDelay(kBaseMinimumDelayMs)); - EXPECT_EQ(dm_->effective_minimum_delay_ms_for_test(), kMaximumDelayMs); + EXPECT_TRUE(dm_.SetMaximumDelay(kMaximumDelayMs)); + EXPECT_TRUE(dm_.SetMinimumDelay(kMinimumDelayMs)); + EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMs)); + EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kMaximumDelayMs); } TEST_F(DelayManagerTest, BaseMinimumDelayLowerThanMaxSize) { @@ -245,10 +192,10 @@ TEST_F(DelayManagerTest, BaseMinimumDelayLowerThanMaxSize) { RTC_DCHECK_GT(kBaseMinimumDelayMs, kMinimumDelayMs); RTC_DCHECK_LT(kBaseMinimumDelayMs, kMaximumDelayMs); - EXPECT_TRUE(dm_->SetMaximumDelay(kMaximumDelayMs)); - EXPECT_TRUE(dm_->SetMinimumDelay(kMinimumDelayMs)); - EXPECT_TRUE(dm_->SetBaseMinimumDelay(kBaseMinimumDelayMs)); - EXPECT_EQ(dm_->effective_minimum_delay_ms_for_test(), kBaseMinimumDelayMs); + EXPECT_TRUE(dm_.SetMaximumDelay(kMaximumDelayMs)); + EXPECT_TRUE(dm_.SetMinimumDelay(kMinimumDelayMs)); + EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMs)); + EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kBaseMinimumDelayMs); } TEST_F(DelayManagerTest, MinimumDelayMemorization) { @@ -260,194 +207,50 @@ TEST_F(DelayManagerTest, MinimumDelayMemorization) { constexpr int kMinimumDelayMs = 20; constexpr int kBaseMinimumDelayMsHigh = 30; - EXPECT_TRUE(dm_->SetBaseMinimumDelay(kBaseMinimumDelayMsLow)); - EXPECT_TRUE(dm_->SetMinimumDelay(kMinimumDelayMs)); + EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMsLow)); + EXPECT_TRUE(dm_.SetMinimumDelay(kMinimumDelayMs)); // Minimum delay is used as it is higher than base minimum delay. - EXPECT_EQ(dm_->effective_minimum_delay_ms_for_test(), kMinimumDelayMs); + EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kMinimumDelayMs); - EXPECT_TRUE(dm_->SetBaseMinimumDelay(kBaseMinimumDelayMsHigh)); + EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMsHigh)); // Base minimum delay is used as it is now higher than minimum delay. - EXPECT_EQ(dm_->effective_minimum_delay_ms_for_test(), - kBaseMinimumDelayMsHigh); + EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kBaseMinimumDelayMsHigh); - EXPECT_TRUE(dm_->SetBaseMinimumDelay(kBaseMinimumDelayMsLow)); + EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMsLow)); // Check that minimum delay is memorized and is used again. - EXPECT_EQ(dm_->effective_minimum_delay_ms_for_test(), kMinimumDelayMs); + EXPECT_EQ(dm_.effective_minimum_delay_ms_for_test(), kMinimumDelayMs); } TEST_F(DelayManagerTest, BaseMinimumDelay) { - const int kExpectedTarget = 5 * kFrameSizeMs; // First packet arrival. InsertNextPacket(); - // Second packet arrival. - IncreaseTime(kExpectedTarget); - InsertNextPacket(); - - // No limit is applied. - EXPECT_EQ(kExpectedTarget, dm_->TargetDelayMs()); constexpr int kBaseMinimumDelayMs = 7 * kFrameSizeMs; - EXPECT_TRUE(dm_->SetBaseMinimumDelay(kBaseMinimumDelayMs)); - EXPECT_EQ(dm_->GetBaseMinimumDelay(), kBaseMinimumDelayMs); + EXPECT_LT(dm_.TargetDelayMs(), kBaseMinimumDelayMs); + EXPECT_TRUE(dm_.SetBaseMinimumDelay(kBaseMinimumDelayMs)); + EXPECT_EQ(dm_.GetBaseMinimumDelay(), kBaseMinimumDelayMs); IncreaseTime(kFrameSizeMs); InsertNextPacket(); - EXPECT_EQ(dm_->GetBaseMinimumDelay(), kBaseMinimumDelayMs); - EXPECT_EQ(kBaseMinimumDelayMs, dm_->TargetDelayMs()); -} - -TEST_F(DelayManagerTest, BaseMinimumDelayAffectsTargetDelay) { - const int kExpectedTarget = 5; - const int kTimeIncrement = kExpectedTarget * kFrameSizeMs; - // First packet arrival. - InsertNextPacket(); - // Second packet arrival. - IncreaseTime(kTimeIncrement); - InsertNextPacket(); - - // No limit is applied. - EXPECT_EQ(kTimeIncrement, dm_->TargetDelayMs()); - - // Minimum delay is lower than base minimum delay, that is why base minimum - // delay is used to calculate target level. - constexpr int kMinimumDelayPackets = kExpectedTarget + 1; - constexpr int kBaseMinimumDelayPackets = kExpectedTarget + 2; - - constexpr int kMinimumDelayMs = kMinimumDelayPackets * kFrameSizeMs; - constexpr int kBaseMinimumDelayMs = kBaseMinimumDelayPackets * kFrameSizeMs; - - EXPECT_TRUE(kMinimumDelayMs < kBaseMinimumDelayMs); - EXPECT_TRUE(dm_->SetMinimumDelay(kMinimumDelayMs)); - EXPECT_TRUE(dm_->SetBaseMinimumDelay(kBaseMinimumDelayMs)); - EXPECT_EQ(dm_->GetBaseMinimumDelay(), kBaseMinimumDelayMs); - - IncreaseTime(kFrameSizeMs); - InsertNextPacket(); - EXPECT_EQ(dm_->GetBaseMinimumDelay(), kBaseMinimumDelayMs); - EXPECT_EQ(kBaseMinimumDelayMs, dm_->TargetDelayMs()); + EXPECT_EQ(dm_.GetBaseMinimumDelay(), kBaseMinimumDelayMs); + EXPECT_EQ(kBaseMinimumDelayMs, dm_.TargetDelayMs()); } TEST_F(DelayManagerTest, Failures) { // Wrong sample rate. - EXPECT_EQ(absl::nullopt, dm_->Update(0, -1)); + EXPECT_EQ(absl::nullopt, dm_.Update(0, -1)); // Wrong packet size. - EXPECT_EQ(-1, dm_->SetPacketAudioLength(0)); - EXPECT_EQ(-1, dm_->SetPacketAudioLength(-1)); + EXPECT_EQ(-1, dm_.SetPacketAudioLength(0)); + EXPECT_EQ(-1, dm_.SetPacketAudioLength(-1)); // Minimum delay higher than a maximum delay is not accepted. - EXPECT_TRUE(dm_->SetMaximumDelay(20)); - EXPECT_FALSE(dm_->SetMinimumDelay(40)); + EXPECT_TRUE(dm_.SetMaximumDelay(20)); + EXPECT_FALSE(dm_.SetMinimumDelay(40)); // Maximum delay less than minimum delay is not accepted. - EXPECT_TRUE(dm_->SetMaximumDelay(100)); - EXPECT_TRUE(dm_->SetMinimumDelay(80)); - EXPECT_FALSE(dm_->SetMaximumDelay(60)); -} - -TEST_F(DelayManagerTest, DelayHistogramFieldTrial) { - { - test::ScopedFieldTrials field_trial( - "WebRTC-Audio-NetEqDelayHistogram/Enabled-96-0.998/"); - RecreateDelayManager(); - EXPECT_EQ(1030792151, dm_->histogram_quantile()); // 0.96 in Q30. - EXPECT_EQ( - 32702, - dm_->histogram()->base_forget_factor_for_testing()); // 0.998 in Q15. - EXPECT_FALSE(dm_->histogram()->start_forget_weight_for_testing()); - } - { - test::ScopedFieldTrials field_trial( - "WebRTC-Audio-NetEqDelayHistogram/Enabled-97.5-0.998/"); - RecreateDelayManager(); - EXPECT_EQ(1046898278, dm_->histogram_quantile()); // 0.975 in Q30. - EXPECT_EQ( - 32702, - dm_->histogram()->base_forget_factor_for_testing()); // 0.998 in Q15. - EXPECT_FALSE(dm_->histogram()->start_forget_weight_for_testing()); - } - // Test parameter for new call start adaptation. - { - test::ScopedFieldTrials field_trial( - "WebRTC-Audio-NetEqDelayHistogram/Enabled-96-0.998-1/"); - RecreateDelayManager(); - EXPECT_EQ(dm_->histogram()->start_forget_weight_for_testing().value(), 1.0); - } - { - test::ScopedFieldTrials field_trial( - "WebRTC-Audio-NetEqDelayHistogram/Enabled-96-0.998-1.5/"); - RecreateDelayManager(); - EXPECT_EQ(dm_->histogram()->start_forget_weight_for_testing().value(), 1.5); - } - { - test::ScopedFieldTrials field_trial( - "WebRTC-Audio-NetEqDelayHistogram/Enabled-96-0.998-0.5/"); - RecreateDelayManager(); - EXPECT_FALSE(dm_->histogram()->start_forget_weight_for_testing()); - } -} - -TEST_F(DelayManagerTest, RelativeArrivalDelay) { - use_mock_histogram_ = true; - RecreateDelayManager(); - - InsertNextPacket(); - - IncreaseTime(kFrameSizeMs); - EXPECT_CALL(*mock_histogram_, Add(0)); // Not delayed. - InsertNextPacket(); - - IncreaseTime(2 * kFrameSizeMs); - EXPECT_CALL(*mock_histogram_, Add(1)); // 20ms delayed. - dm_->Update(ts_, kFs); - - IncreaseTime(2 * kFrameSizeMs); - EXPECT_CALL(*mock_histogram_, Add(2)); // 40ms delayed. - dm_->Update(ts_ + kTsIncrement, kFs); - - EXPECT_CALL(*mock_histogram_, Add(1)); // Reordered, 20ms delayed. - dm_->Update(ts_, kFs); -} - -TEST_F(DelayManagerTest, ReorderedPackets) { - use_mock_histogram_ = true; - RecreateDelayManager(); - - // Insert first packet. - InsertNextPacket(); - - // Insert reordered packet. - EXPECT_CALL(*mock_histogram_, Add(4)); - dm_->Update(ts_ - 5 * kTsIncrement, kFs); - - // Insert another reordered packet. - EXPECT_CALL(*mock_histogram_, Add(1)); - dm_->Update(ts_ - 2 * kTsIncrement, kFs); - - // Insert the next packet in order and verify that the relative delay is - // estimated based on the first inserted packet. - IncreaseTime(4 * kFrameSizeMs); - EXPECT_CALL(*mock_histogram_, Add(3)); - InsertNextPacket(); -} - -TEST_F(DelayManagerTest, MaxDelayHistory) { - use_mock_histogram_ = true; - RecreateDelayManager(); - - InsertNextPacket(); - - // Insert 20 ms iat delay in the delay history. - IncreaseTime(2 * kFrameSizeMs); - EXPECT_CALL(*mock_histogram_, Add(1)); // 20ms delayed. - InsertNextPacket(); - - // Insert next packet with a timestamp difference larger than maximum history - // size. This removes the previously inserted iat delay from the history. - constexpr int kMaxHistoryMs = 2000; - IncreaseTime(kMaxHistoryMs + kFrameSizeMs); - ts_ += kFs * kMaxHistoryMs / 1000; - EXPECT_CALL(*mock_histogram_, Add(0)); // Not delayed. - dm_->Update(ts_, kFs); + EXPECT_TRUE(dm_.SetMaximumDelay(100)); + EXPECT_TRUE(dm_.SetMinimumDelay(80)); + EXPECT_FALSE(dm_.SetMaximumDelay(60)); } TEST_F(DelayManagerTest, RelativeArrivalDelayStatistic) { @@ -459,31 +262,4 @@ TEST_F(DelayManagerTest, RelativeArrivalDelayStatistic) { EXPECT_EQ(20, InsertNextPacket()); } -TEST_F(DelayManagerTest, ResamplePacketDelays) { - use_mock_histogram_ = true; - resample_interval_ms_ = 500; - RecreateDelayManager(); - - // The histogram should be updated once with the maximum delay observed for - // the following sequence of packets. - EXPECT_CALL(*mock_histogram_, Add(5)).Times(1); - - EXPECT_EQ(absl::nullopt, InsertNextPacket()); - - IncreaseTime(kFrameSizeMs); - EXPECT_EQ(0, InsertNextPacket()); - IncreaseTime(3 * kFrameSizeMs); - EXPECT_EQ(2 * kFrameSizeMs, InsertNextPacket()); - IncreaseTime(4 * kFrameSizeMs); - EXPECT_EQ(5 * kFrameSizeMs, InsertNextPacket()); - - for (int i = 4; i >= 0; --i) { - EXPECT_EQ(i * kFrameSizeMs, InsertNextPacket()); - } - for (int i = 0; i < *resample_interval_ms_ / kFrameSizeMs; ++i) { - IncreaseTime(kFrameSizeMs); - EXPECT_EQ(0, InsertNextPacket()); - } -} - } // namespace webrtc diff --git a/modules/audio_coding/neteq/dsp_helper.cc b/modules/audio_coding/neteq/dsp_helper.cc index 91979f2d48..a979f94214 100644 --- a/modules/audio_coding/neteq/dsp_helper.cc +++ b/modules/audio_coding/neteq/dsp_helper.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/neteq/dsp_helper.h" -#include #include // Access to memset. #include // Access to min, max. @@ -95,7 +94,7 @@ int DspHelper::RampSignal(AudioMultiVector* signal, return factor; } int end_factor = 0; - // Loop over the channels, starting at the same |factor| each time. + // Loop over the channels, starting at the same `factor` each time. for (size_t channel = 0; channel < signal->Channels(); ++channel) { end_factor = RampSignal(&(*signal)[channel], start_index, length, factor, increment); @@ -117,7 +116,7 @@ void DspHelper::PeakDetection(int16_t* data, // Single peak. The parabola fit assumes that an extra point is // available; worst case it gets a zero on the high end of the signal. // TODO(hlundin): This can potentially get much worse. It breaks the - // API contract, that the length of |data| is |data_length|. + // API contract, that the length of `data` is `data_length`. data_length++; } @@ -355,7 +354,7 @@ int DspHelper::DownsampleTo4kHz(const int16_t* input, break; } default: { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return -1; } } diff --git a/modules/audio_coding/neteq/dsp_helper.h b/modules/audio_coding/neteq/dsp_helper.h index 82fe14e6ea..4aead7df18 100644 --- a/modules/audio_coding/neteq/dsp_helper.h +++ b/modules/audio_coding/neteq/dsp_helper.h @@ -16,7 +16,6 @@ #include "modules/audio_coding/neteq/audio_multi_vector.h" #include "modules/audio_coding/neteq/audio_vector.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -51,8 +50,8 @@ class DspHelper { static const int kUnmuteFactorIncrement48kHz = 1057; // Multiplies the signal with a gradually changing factor. - // The first sample is multiplied with |factor| (in Q14). For each sample, - // |factor| is increased (additive) by the |increment| (in Q20), which can + // The first sample is multiplied with `factor` (in Q14). For each sample, + // `factor` is increased (additive) by the `increment` (in Q20), which can // be negative. Returns the scale factor after the last increment. static int RampSignal(const int16_t* input, size_t length, @@ -60,14 +59,14 @@ class DspHelper { int increment, int16_t* output); - // Same as above, but with the samples of |signal| being modified in-place. + // Same as above, but with the samples of `signal` being modified in-place. static int RampSignal(int16_t* signal, size_t length, int factor, int increment); - // Same as above, but processes |length| samples from |signal|, starting at - // |start_index|. + // Same as above, but processes `length` samples from `signal`, starting at + // `start_index`. static int RampSignal(AudioVector* signal, size_t start_index, size_t length, @@ -81,10 +80,10 @@ class DspHelper { int factor, int increment); - // Peak detection with parabolic fit. Looks for |num_peaks| maxima in |data|, - // having length |data_length| and sample rate multiplier |fs_mult|. The peak - // locations and values are written to the arrays |peak_index| and - // |peak_value|, respectively. Both arrays must hold at least |num_peaks| + // Peak detection with parabolic fit. Looks for `num_peaks` maxima in `data`, + // having length `data_length` and sample rate multiplier `fs_mult`. The peak + // locations and values are written to the arrays `peak_index` and + // `peak_value`, respectively. Both arrays must hold at least `num_peaks` // elements. static void PeakDetection(int16_t* data, size_t data_length, @@ -94,30 +93,30 @@ class DspHelper { int16_t* peak_value); // Estimates the height and location of a maximum. The three values in the - // array |signal_points| are used as basis for a parabolic fit, which is then - // used to find the maximum in an interpolated signal. The |signal_points| are + // array `signal_points` are used as basis for a parabolic fit, which is then + // used to find the maximum in an interpolated signal. The `signal_points` are // assumed to be from a 4 kHz signal, while the maximum, written to - // |peak_index| and |peak_value| is given in the full sample rate, as - // indicated by the sample rate multiplier |fs_mult|. + // `peak_index` and `peak_value` is given in the full sample rate, as + // indicated by the sample rate multiplier `fs_mult`. static void ParabolicFit(int16_t* signal_points, int fs_mult, size_t* peak_index, int16_t* peak_value); - // Calculates the sum-abs-diff for |signal| when compared to a displaced + // Calculates the sum-abs-diff for `signal` when compared to a displaced // version of itself. Returns the displacement lag that results in the minimum - // distortion. The resulting distortion is written to |distortion_value|. - // The values of |min_lag| and |max_lag| are boundaries for the search. + // distortion. The resulting distortion is written to `distortion_value`. + // The values of `min_lag` and `max_lag` are boundaries for the search. static size_t MinDistortion(const int16_t* signal, size_t min_lag, size_t max_lag, size_t length, int32_t* distortion_value); - // Mixes |length| samples from |input1| and |input2| together and writes the - // result to |output|. The gain for |input1| starts at |mix_factor| (Q14) and - // is decreased by |factor_decrement| (Q14) for each sample. The gain for - // |input2| is the complement 16384 - mix_factor. + // Mixes `length` samples from `input1` and `input2` together and writes the + // result to `output`. The gain for `input1` starts at `mix_factor` (Q14) and + // is decreased by `factor_decrement` (Q14) for each sample. The gain for + // `input2` is the complement 16384 - mix_factor. static void CrossFade(const int16_t* input1, const int16_t* input2, size_t length, @@ -125,24 +124,24 @@ class DspHelper { int16_t factor_decrement, int16_t* output); - // Scales |input| with an increasing gain. Applies |factor| (Q14) to the first - // sample and increases the gain by |increment| (Q20) for each sample. The - // result is written to |output|. |length| samples are processed. + // Scales `input` with an increasing gain. Applies `factor` (Q14) to the first + // sample and increases the gain by `increment` (Q20) for each sample. The + // result is written to `output`. `length` samples are processed. static void UnmuteSignal(const int16_t* input, size_t length, int16_t* factor, int increment, int16_t* output); - // Starts at unity gain and gradually fades out |signal|. For each sample, - // the gain is reduced by |mute_slope| (Q14). |length| samples are processed. + // Starts at unity gain and gradually fades out `signal`. For each sample, + // the gain is reduced by `mute_slope` (Q14). `length` samples are processed. static void MuteSignal(int16_t* signal, int mute_slope, size_t length); - // Downsamples |input| from |sample_rate_hz| to 4 kHz sample rate. The input - // has |input_length| samples, and the method will write |output_length| - // samples to |output|. Compensates for the phase delay of the downsampling - // filters if |compensate_delay| is true. Returns -1 if the input is too short - // to produce |output_length| samples, otherwise 0. + // Downsamples `input` from `sample_rate_hz` to 4 kHz sample rate. The input + // has `input_length` samples, and the method will write `output_length` + // samples to `output`. Compensates for the phase delay of the downsampling + // filters if `compensate_delay` is true. Returns -1 if the input is too short + // to produce `output_length` samples, otherwise 0. static int DownsampleTo4kHz(const int16_t* input, size_t input_length, size_t output_length, @@ -150,11 +149,12 @@ class DspHelper { bool compensate_delay, int16_t* output); + DspHelper(const DspHelper&) = delete; + DspHelper& operator=(const DspHelper&) = delete; + private: // Table of constants used in method DspHelper::ParabolicFit(). static const int16_t kParabolaCoefficients[17][3]; - - RTC_DISALLOW_COPY_AND_ASSIGN(DspHelper); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/dsp_helper_unittest.cc b/modules/audio_coding/neteq/dsp_helper_unittest.cc index ec434a4b19..09247417d3 100644 --- a/modules/audio_coding/neteq/dsp_helper_unittest.cc +++ b/modules/audio_coding/neteq/dsp_helper_unittest.cc @@ -24,7 +24,7 @@ TEST(DspHelper, RampSignalArray) { input[i] = 1000; } int start_factor = 0; - // Ramp from 0 to 1 (in Q14) over the array. Note that |increment| is in Q20, + // Ramp from 0 to 1 (in Q14) over the array. Note that `increment` is in Q20, // while the factor is in Q14, hence the shift by 6. int increment = (16384 << 6) / kLen; @@ -36,7 +36,7 @@ TEST(DspHelper, RampSignalArray) { EXPECT_EQ(1000 * i / kLen, output[i]); } - // Test second method. (Note that this modifies |input|.) + // Test second method. (Note that this modifies `input`.) stop_factor = DspHelper::RampSignal(input, kLen, start_factor, increment); EXPECT_EQ(16383, stop_factor); // Almost reach 1 in Q14. for (int i = 0; i < kLen; ++i) { @@ -54,31 +54,31 @@ TEST(DspHelper, RampSignalAudioMultiVector) { input[channel][i] = 1000; } } - // We want to start ramping at |start_index| and keep ramping for |kLen| + // We want to start ramping at `start_index` and keep ramping for `kLen` // samples. int start_index = kLen; int start_factor = 0; - // Ramp from 0 to 1 (in Q14) in |kLen| samples. Note that |increment| is in + // Ramp from 0 to 1 (in Q14) in `kLen` samples. Note that `increment` is in // Q20, while the factor is in Q14, hence the shift by 6. int increment = (16384 << 6) / kLen; int stop_factor = DspHelper::RampSignal(&input, start_index, kLen, start_factor, increment); EXPECT_EQ(16383, stop_factor); // Almost reach 1 in Q14. - // Verify that the first |kLen| samples are left untouched. + // Verify that the first `kLen` samples are left untouched. int i; for (i = 0; i < kLen; ++i) { for (int channel = 0; channel < kChannels; ++channel) { EXPECT_EQ(1000, input[channel][i]); } } - // Verify that the next block of |kLen| samples are ramped. + // Verify that the next block of `kLen` samples are ramped. for (; i < 2 * kLen; ++i) { for (int channel = 0; channel < kChannels; ++channel) { EXPECT_EQ(1000 * (i - kLen) / kLen, input[channel][i]); } } - // Verify the last |kLen| samples are left untouched. + // Verify the last `kLen` samples are left untouched. for (; i < 3 * kLen; ++i) { for (int channel = 0; channel < kChannels; ++channel) { EXPECT_EQ(1000, input[channel][i]); diff --git a/modules/audio_coding/neteq/dtmf_buffer.cc b/modules/audio_coding/neteq/dtmf_buffer.cc index f81036b85b..9f78aca6e2 100644 --- a/modules/audio_coding/neteq/dtmf_buffer.cc +++ b/modules/audio_coding/neteq/dtmf_buffer.cc @@ -32,7 +32,7 @@ void DtmfBuffer::Flush() { buffer_.clear(); } -// The ParseEvent method parses 4 bytes from |payload| according to this format +// The ParseEvent method parses 4 bytes from `payload` according to this format // from RFC 4733: // // 0 1 2 3 @@ -119,8 +119,8 @@ int DtmfBuffer::InsertEvent(const DtmfEvent& event) { bool DtmfBuffer::GetEvent(uint32_t current_timestamp, DtmfEvent* event) { DtmfList::iterator it = buffer_.begin(); while (it != buffer_.end()) { - // |event_end| is an estimate of where the current event ends. If the end - // bit is set, we know that the event ends at |timestamp| + |duration|. + // `event_end` is an estimate of where the current event ends. If the end + // bit is set, we know that the event ends at `timestamp` + `duration`. uint32_t event_end = it->timestamp + it->duration; #ifdef LEGACY_BITEXACT bool next_available = false; @@ -226,7 +226,7 @@ bool DtmfBuffer::MergeEvents(DtmfList::iterator it, const DtmfEvent& event) { } } -// Returns true if |a| goes before |b| in the sorting order ("|a| < |b|"). +// Returns true if `a` goes before `b` in the sorting order ("`a` < `b`"). // The events are ranked using their start timestamp (taking wrap-around into // account). In the unlikely situation that two events share the same start // timestamp, the event number is used to rank the two. Note that packets diff --git a/modules/audio_coding/neteq/dtmf_buffer.h b/modules/audio_coding/neteq/dtmf_buffer.h index 6bf75e1c13..62b751525c 100644 --- a/modules/audio_coding/neteq/dtmf_buffer.h +++ b/modules/audio_coding/neteq/dtmf_buffer.h @@ -16,8 +16,6 @@ #include -#include "rtc_base/constructor_magic.h" - namespace webrtc { struct DtmfEvent { @@ -45,29 +43,32 @@ class DtmfBuffer { kInvalidSampleRate }; - // Set up the buffer for use at sample rate |fs_hz|. + // Set up the buffer for use at sample rate `fs_hz`. explicit DtmfBuffer(int fs_hz); virtual ~DtmfBuffer(); + DtmfBuffer(const DtmfBuffer&) = delete; + DtmfBuffer& operator=(const DtmfBuffer&) = delete; + // Flushes the buffer. virtual void Flush(); - // Static method to parse 4 bytes from |payload| as a DTMF event (RFC 4733) - // and write the parsed information into the struct |event|. Input variable - // |rtp_timestamp| is simply copied into the struct. + // Static method to parse 4 bytes from `payload` as a DTMF event (RFC 4733) + // and write the parsed information into the struct `event`. Input variable + // `rtp_timestamp` is simply copied into the struct. static int ParseEvent(uint32_t rtp_timestamp, const uint8_t* payload, size_t payload_length_bytes, DtmfEvent* event); - // Inserts |event| into the buffer. The method looks for a matching event and + // Inserts `event` into the buffer. The method looks for a matching event and // merges the two if a match is found. virtual int InsertEvent(const DtmfEvent& event); - // Checks if a DTMF event should be played at time |current_timestamp|. If so, + // Checks if a DTMF event should be played at time `current_timestamp`. If so, // the method returns true; otherwise false. The parameters of the event to - // play will be written to |event|. + // play will be written to `event`. virtual bool GetEvent(uint32_t current_timestamp, DtmfEvent* event); // Number of events in the buffer. @@ -87,7 +88,7 @@ class DtmfBuffer { // Compares two events and returns true if they are the same. static bool SameEvent(const DtmfEvent& a, const DtmfEvent& b); - // Merges |event| to the event pointed out by |it|. The method checks that + // Merges `event` to the event pointed out by `it`. The method checks that // the two events are the same (using the SameEvent method), and merges them // if that was the case, returning true. If the events are not the same, false // is returned. @@ -97,8 +98,6 @@ class DtmfBuffer { static bool CompareEvents(const DtmfEvent& a, const DtmfEvent& b); DtmfList buffer_; - - RTC_DISALLOW_COPY_AND_ASSIGN(DtmfBuffer); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/dtmf_buffer_unittest.cc b/modules/audio_coding/neteq/dtmf_buffer_unittest.cc index 607a5ec6fa..83745b6c09 100644 --- a/modules/audio_coding/neteq/dtmf_buffer_unittest.cc +++ b/modules/audio_coding/neteq/dtmf_buffer_unittest.cc @@ -208,12 +208,12 @@ TEST(DtmfBuffer, ExtrapolationTime) { DtmfEvent event2(timestamp, event_no, volume, duration, end_bit); EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event2)); EXPECT_EQ(2u, buffer.Length()); - // Now we expect to get the new event when supplying |timestamp_now|. + // Now we expect to get the new event when supplying `timestamp_now`. EXPECT_TRUE(buffer.GetEvent(timestamp_now, &out_event)); EXPECT_TRUE(EqualEvents(event2, out_event)); // Expect the the first event to be erased now. EXPECT_EQ(1u, buffer.Length()); - // Move |timestamp_now| to more than 560 samples after the end of the second + // Move `timestamp_now` to more than 560 samples after the end of the second // event. Expect that event to be erased. timestamp_now = timestamp + duration + 600; #ifdef LEGACY_BITEXACT diff --git a/modules/audio_coding/neteq/dtmf_tone_generator.cc b/modules/audio_coding/neteq/dtmf_tone_generator.cc index 6c412e364d..9061e27c67 100644 --- a/modules/audio_coding/neteq/dtmf_tone_generator.cc +++ b/modules/audio_coding/neteq/dtmf_tone_generator.cc @@ -119,7 +119,7 @@ int DtmfToneGenerator::Init(int fs, int event, int attenuation) { } else if (fs == 48000) { fs_index = 3; } else { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); fs_index = 1; // Default to 8000 Hz. } @@ -167,7 +167,7 @@ void DtmfToneGenerator::Reset() { initialized_ = false; } -// Generate num_samples of DTMF signal and write to |output|. +// Generate num_samples of DTMF signal and write to `output`. int DtmfToneGenerator::Generate(size_t num_samples, AudioMultiVector* output) { if (!initialized_) { return kNotInitialized; diff --git a/modules/audio_coding/neteq/dtmf_tone_generator.h b/modules/audio_coding/neteq/dtmf_tone_generator.h index 968bc7f8c7..35114f4f49 100644 --- a/modules/audio_coding/neteq/dtmf_tone_generator.h +++ b/modules/audio_coding/neteq/dtmf_tone_generator.h @@ -15,7 +15,6 @@ #include #include "modules/audio_coding/neteq/audio_multi_vector.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -29,6 +28,10 @@ class DtmfToneGenerator { DtmfToneGenerator(); virtual ~DtmfToneGenerator() {} + + DtmfToneGenerator(const DtmfToneGenerator&) = delete; + DtmfToneGenerator& operator=(const DtmfToneGenerator&) = delete; + virtual int Init(int fs, int event, int attenuation); virtual void Reset(); virtual int Generate(size_t num_samples, AudioMultiVector* output); @@ -48,8 +51,6 @@ class DtmfToneGenerator { int amplitude_; // Amplitude for this event. int16_t sample_history1_[2]; // Last 2 samples for the 1st oscillator. int16_t sample_history2_[2]; // Last 2 samples for the 2nd oscillator. - - RTC_DISALLOW_COPY_AND_ASSIGN(DtmfToneGenerator); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/expand.cc b/modules/audio_coding/neteq/expand.cc index ffaa4c74aa..9c3274609f 100644 --- a/modules/audio_coding/neteq/expand.cc +++ b/modules/audio_coding/neteq/expand.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/neteq/expand.h" -#include #include // memset #include // min, max @@ -168,7 +167,7 @@ int Expand::Process(AudioMultiVector* output) { } // Smooth the expanded if it has not been muted to a low amplitude and - // |current_voice_mix_factor| is larger than 0.5. + // `current_voice_mix_factor` is larger than 0.5. if ((parameters.mute_factor > 819) && (parameters.current_voice_mix_factor > 8192)) { size_t start_ix = sync_buffer_->Size() - overlap_length_; @@ -198,7 +197,7 @@ int Expand::Process(AudioMultiVector* output) { } // Unvoiced part. - // Filter |scaled_random_vector| through |ar_filter_|. + // Filter `scaled_random_vector` through `ar_filter_`. memcpy(unvoiced_vector - kUnvoicedLpcOrder, parameters.ar_filter_state, sizeof(int16_t) * kUnvoicedLpcOrder); int32_t add_constant = 0; @@ -403,7 +402,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { // Calculate correlation in downsampled domain (4 kHz sample rate). size_t correlation_length = 51; // TODO(hlundin): Legacy bit-exactness. - // If it is decided to break bit-exactness |correlation_length| should be + // If it is decided to break bit-exactness `correlation_length` should be // initialized to the return value of Correlation(). Correlation(audio_history.get(), signal_length, correlation_vector); @@ -418,7 +417,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { best_correlation_index[1] += fs_mult_20; best_correlation_index[2] += fs_mult_20; - // Calculate distortion around the |kNumCorrelationCandidates| best lags. + // Calculate distortion around the `kNumCorrelationCandidates` best lags. int distortion_scale = 0; for (size_t i = 0; i < kNumCorrelationCandidates; i++) { size_t min_index = @@ -435,7 +434,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { WebRtcSpl_VectorBitShiftW32ToW16(best_distortion, kNumCorrelationCandidates, best_distortion_w32, distortion_scale); - // Find the maximizing index |i| of the cost function + // Find the maximizing index `i` of the cost function // f[i] = best_correlation[i] / best_distortion[i]. int32_t best_ratio = std::numeric_limits::min(); size_t best_index = std::numeric_limits::max(); @@ -459,7 +458,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { max_lag_ = std::max(distortion_lag, correlation_lag); // Calculate the exact best correlation in the range between - // |correlation_lag| and |distortion_lag|. + // `correlation_lag` and `distortion_lag`. correlation_length = std::max(std::min(distortion_lag + 10, fs_mult_120), static_cast(60 * fs_mult)); @@ -488,7 +487,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { (31 - WebRtcSpl_NormW32(static_cast(correlation_length))) - 31; correlation_scale = std::max(0, correlation_scale); - // Calculate the correlation, store in |correlation_vector2|. + // Calculate the correlation, store in `correlation_vector2`. WebRtcSpl_CrossCorrelation( correlation_vector2, &(audio_history[signal_length - correlation_length]), @@ -538,7 +537,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { } // Extract the two vectors expand_vector0 and expand_vector1 from - // |audio_history|. + // `audio_history`. size_t expansion_length = max_lag_ + overlap_length_; const int16_t* vector1 = &(audio_history[signal_length - expansion_length]); const int16_t* vector2 = vector1 - distortion_lag; @@ -595,13 +594,13 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { expand_lags_[1] = distortion_lag; expand_lags_[2] = distortion_lag; } else { - // |distortion_lag| and |correlation_lag| are not equal; use different + // `distortion_lag` and `correlation_lag` are not equal; use different // combinations of the two. - // First lag is |distortion_lag| only. + // First lag is `distortion_lag` only. expand_lags_[0] = distortion_lag; // Second lag is the average of the two. expand_lags_[1] = (distortion_lag + correlation_lag) / 2; - // Third lag is the average again, but rounding towards |correlation_lag|. + // Third lag is the average again, but rounding towards `correlation_lag`. if (distortion_lag > correlation_lag) { expand_lags_[2] = (distortion_lag + correlation_lag - 1) / 2; } else { @@ -639,7 +638,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { if (stability != 1) { // Set first coefficient to 4096 (1.0 in Q12). parameters.ar_filter[0] = 4096; - // Set remaining |kUnvoicedLpcOrder| coefficients to zero. + // Set remaining `kUnvoicedLpcOrder` coefficients to zero. WebRtcSpl_MemSetW16(parameters.ar_filter + 1, 0, kUnvoicedLpcOrder); } } @@ -657,7 +656,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { sizeof(int16_t) * noise_length); } else { // Only applies to SWB where length could be larger than - // |kRandomTableSize|. + // `kRandomTableSize`. memcpy(random_vector, RandomVector::kRandomTable, sizeof(int16_t) * RandomVector::kRandomTableSize); RTC_DCHECK_LE(noise_length, kMaxSampleRate / 8000 * 120 + 30); @@ -695,7 +694,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { int32_t unvoiced_energy = WebRtcSpl_DotProductWithScale( unvoiced_vector, unvoiced_vector, 128, unvoiced_prescale); - // Normalize |unvoiced_energy| to 28 or 29 bits to preserve sqrt() accuracy. + // Normalize `unvoiced_energy` to 28 or 29 bits to preserve sqrt() accuracy. int16_t unvoiced_scale = WebRtcSpl_NormW32(unvoiced_energy) - 3; // Make sure we do an odd number of shifts since we already have 7 shifts // from dividing with 128 earlier. This will make the total scale factor @@ -716,7 +715,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { // voice_mix_factor = 0; if (corr_coefficient > 7875) { int16_t x1, x2, x3; - // |corr_coefficient| is in Q14. + // `corr_coefficient` is in Q14. x1 = static_cast(corr_coefficient); x2 = (x1 * x1) >> 14; // Shift 14 to keep result in Q14. x3 = (x1 * x2) >> 14; @@ -734,13 +733,13 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { } // Calculate muting slope. Reuse value from earlier scaling of - // |expand_vector0| and |expand_vector1|. + // `expand_vector0` and `expand_vector1`. int16_t slope = amplitude_ratio; if (slope > 12288) { // slope > 1.5. // Calculate (1 - (1 / slope)) / distortion_lag = // (slope - 1) / (distortion_lag * slope). - // |slope| is in Q13, so 1 corresponds to 8192. Shift up to Q25 before + // `slope` is in Q13, so 1 corresponds to 8192. Shift up to Q25 before // the division. // Shift the denominator from Q13 to Q5 before the division. The result of // the division will then be in Q20. @@ -758,7 +757,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) { parameters.onset = true; } else { // Calculate (1 - slope) / distortion_lag. - // Shift |slope| by 7 to Q20 before the division. The result is in Q20. + // Shift `slope` by 7 to Q20 before the division. The result is in Q20. parameters.mute_slope = WebRtcSpl_DivW32W16( (8192 - slope) * 128, static_cast(distortion_lag)); if (parameters.voice_mix_factor <= 13107) { @@ -827,7 +826,7 @@ void Expand::Correlation(const int16_t* input, kDownsampledLength, filter_coefficients, num_coefficients, downsampling_factor, kFilterDelay); - // Normalize |downsampled_input| to using all 16 bits. + // Normalize `downsampled_input` to using all 16 bits. int16_t max_value = WebRtcSpl_MaxAbsValueW16(downsampled_input, kDownsampledLength); int16_t norm_shift = 16 - WebRtcSpl_NormW32(max_value); diff --git a/modules/audio_coding/neteq/expand.h b/modules/audio_coding/neteq/expand.h index 3b0cea3d93..2e64583ec2 100644 --- a/modules/audio_coding/neteq/expand.h +++ b/modules/audio_coding/neteq/expand.h @@ -11,12 +11,10 @@ #ifndef MODULES_AUDIO_CODING_NETEQ_EXPAND_H_ #define MODULES_AUDIO_CODING_NETEQ_EXPAND_H_ -#include #include #include "modules/audio_coding/neteq/audio_vector.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -42,11 +40,14 @@ class Expand { virtual ~Expand(); + Expand(const Expand&) = delete; + Expand& operator=(const Expand&) = delete; + // Resets the object. virtual void Reset(); // The main method to produce concealment data. The data is appended to the - // end of |output|. + // end of `output`. virtual int Process(AudioMultiVector* output); // Prepare the object to do extra expansion during normal operation following @@ -57,7 +58,7 @@ class Expand { // a period of expands. virtual void SetParametersForMergeAfterExpand(); - // Returns the mute factor for |channel|. + // Returns the mute factor for `channel`. int16_t MuteFactor(size_t channel) const { RTC_DCHECK_LT(channel, num_channels_); return channel_parameters_[channel].mute_factor; @@ -82,7 +83,7 @@ class Expand { bool TooManyExpands(); - // Analyzes the signal history in |sync_buffer_|, and set up all parameters + // Analyzes the signal history in `sync_buffer_`, and set up all parameters // necessary to produce concealment data. void AnalyzeSignal(int16_t* random_vector); @@ -116,9 +117,9 @@ class Expand { int mute_slope; /* Q20 */ }; - // Calculate the auto-correlation of |input|, with length |input_length| + // Calculate the auto-correlation of `input`, with length `input_length` // samples. The correlation is calculated from a downsampled version of - // |input|, and is written to |output|. + // `input`, and is written to `output`. void Correlation(const int16_t* input, size_t input_length, int16_t* output) const; @@ -135,8 +136,6 @@ class Expand { bool stop_muting_; size_t expand_duration_samples_; std::unique_ptr channel_parameters_; - - RTC_DISALLOW_COPY_AND_ASSIGN(Expand); }; struct ExpandFactory { diff --git a/modules/audio_coding/neteq/expand_uma_logger.h b/modules/audio_coding/neteq/expand_uma_logger.h index 246aaffd4f..a29d3532f3 100644 --- a/modules/audio_coding/neteq/expand_uma_logger.h +++ b/modules/audio_coding/neteq/expand_uma_logger.h @@ -17,7 +17,6 @@ #include "absl/types/optional.h" #include "api/neteq/tick_timer.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -36,6 +35,9 @@ class ExpandUmaLogger { ~ExpandUmaLogger(); + ExpandUmaLogger(const ExpandUmaLogger&) = delete; + ExpandUmaLogger& operator=(const ExpandUmaLogger&) = delete; + // In this call, value should be an incremental sample counter. The sample // rate must be strictly positive. void UpdateSampleCounter(uint64_t value, int sample_rate_hz); @@ -48,8 +50,6 @@ class ExpandUmaLogger { absl::optional last_logged_value_; uint64_t last_value_ = 0; int sample_rate_hz_ = 0; - - RTC_DISALLOW_COPY_AND_ASSIGN(ExpandUmaLogger); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/expand_unittest.cc b/modules/audio_coding/neteq/expand_unittest.cc index 55a8866564..9355fce5e1 100644 --- a/modules/audio_coding/neteq/expand_unittest.cc +++ b/modules/audio_coding/neteq/expand_unittest.cc @@ -124,7 +124,7 @@ TEST_F(ExpandTest, DelayedPacketOutage) { EXPECT_EQ(0, statistics_.last_outage_duration_samples()); } expand_.SetParametersForNormalAfterExpand(); - // Convert |sum_output_len_samples| to milliseconds. + // Convert `sum_output_len_samples` to milliseconds. EXPECT_EQ(rtc::checked_cast(sum_output_len_samples), statistics_.last_outage_duration_samples()); } @@ -135,11 +135,9 @@ TEST_F(ExpandTest, DelayedPacketOutage) { // arrived before it). TEST_F(ExpandTest, LostPacketOutage) { AudioMultiVector output(num_channels_); - size_t sum_output_len_samples = 0; for (int i = 0; i < 10; ++i) { EXPECT_EQ(0, expand_.Process(&output)); EXPECT_GT(output.Size(), 0u); - sum_output_len_samples += output.Size(); EXPECT_EQ(0, statistics_.last_outage_duration_samples()); } expand_.SetParametersForMergeAfterExpand(); @@ -164,7 +162,7 @@ TEST_F(ExpandTest, CheckOutageStatsAfterReset) { EXPECT_EQ(0, statistics_.last_outage_duration_samples()); } expand_.SetParametersForNormalAfterExpand(); - // Convert |sum_output_len_samples| to milliseconds. + // Convert `sum_output_len_samples` to milliseconds. EXPECT_EQ(rtc::checked_cast(sum_output_len_samples), statistics_.last_outage_duration_samples()); } diff --git a/modules/audio_coding/neteq/g3doc/index.md b/modules/audio_coding/neteq/g3doc/index.md index d0624f46ef..e97324d89d 100644 --- a/modules/audio_coding/neteq/g3doc/index.md +++ b/modules/audio_coding/neteq/g3doc/index.md @@ -13,13 +13,13 @@ time keep the delay as low as possible. ## API At a high level, the NetEq API has two main functions: -[`InsertPacket`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/neteq/neteq.h;l=198;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72) +[`InsertPacket`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/neteq/neteq.h;l=198;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72) and -[`GetAudio`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/neteq/neteq.h;l=219;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72). +[`GetAudio`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/neteq/neteq.h;l=219;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72). ### InsertPacket -[`InsertPacket`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/neteq/neteq.h;l=198;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72) +[`InsertPacket`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/neteq/neteq.h;l=198;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72) delivers an RTP packet from the network to NetEq where the following happens: 1. The packet is discarded if it is too late for playout (for example if it was @@ -33,7 +33,7 @@ delivers an RTP packet from the network to NetEq where the following happens: ### GetAudio -[`GetAudio`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/neteq/neteq.h;l=219;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72) +[`GetAudio`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/neteq/neteq.h;l=219;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72) pulls 10 ms of audio from NetEq for playout. A much simplified decision logic is as follows: @@ -68,23 +68,23 @@ There are a number of functions that can be used to query the internal state of NetEq, statistics about the type of audio output and latency metrics such as how long time packets have waited in the buffer. -* [`NetworkStatistics`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/neteq/neteq.h;l=273;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72): +* [`NetworkStatistics`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/neteq/neteq.h;l=273;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72): instantaneous values or stats averaged over the duration since last call to this function. -* [`GetLifetimeStatistics`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/neteq/neteq.h;l=280;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72): +* [`GetLifetimeStatistics`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/neteq/neteq.h;l=280;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72): cumulative stats that persist over the lifetime of the class. -* [`GetOperationsAndState`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/neteq/neteq.h;l=284;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72): +* [`GetOperationsAndState`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/neteq/neteq.h;l=284;drc=4461f059d180fe8c2886d422ebd1cb55b5c83e72): information about the internal state of NetEq (is only inteded to be used for testing and debugging). ## Tests and tools -* [`neteq_rtpplay`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc;drc=cee751abff598fc19506f77de08bea7c61b9dcca): +* [`neteq_rtpplay`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc;drc=cee751abff598fc19506f77de08bea7c61b9dcca): Simulate NetEq behavior based on either an RTP dump, a PCAP file or an RTC event log. A replacement audio file can also be used instead of the original payload. Outputs aggregated statistics and optionally an audio file to listen to. -* [`neteq_speed_test`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_coding/neteq/test/neteq_speed_test.cc;drc=2ab97f6f8e27b47c0d9beeb8b6ca5387bda9f55c): +* [`neteq_speed_test`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_coding/neteq/test/neteq_speed_test.cc;drc=2ab97f6f8e27b47c0d9beeb8b6ca5387bda9f55c): Measure performance of NetEq, used on perf bots. * Unit tests including bit exactness tests where RTP file is used as an input to NetEq, the output is concatenated and a checksum is calculated and diff --git a/modules/audio_coding/neteq/histogram.cc b/modules/audio_coding/neteq/histogram.cc index 15a2394eb0..e4b7f10379 100644 --- a/modules/audio_coding/neteq/histogram.cc +++ b/modules/audio_coding/neteq/histogram.cc @@ -34,42 +34,42 @@ Histogram::Histogram(size_t num_buckets, Histogram::~Histogram() {} // Each element in the vector is first multiplied by the forgetting factor -// |forget_factor_|. Then the vector element indicated by |iat_packets| is then -// increased (additive) by 1 - |forget_factor_|. This way, the probability of -// |value| is slightly increased, while the sum of the histogram remains +// `forget_factor_`. Then the vector element indicated by `iat_packets` is then +// increased (additive) by 1 - `forget_factor_`. This way, the probability of +// `value` is slightly increased, while the sum of the histogram remains // constant (=1). // Due to inaccuracies in the fixed-point arithmetic, the histogram may no // longer sum up to 1 (in Q30) after the update. To correct this, a correction // term is added or subtracted from the first element (or elements) of the // vector. -// The forgetting factor |forget_factor_| is also updated. When the DelayManager +// The forgetting factor `forget_factor_` is also updated. When the DelayManager // is reset, the factor is set to 0 to facilitate rapid convergence in the // beginning. With each update of the histogram, the factor is increased towards -// the steady-state value |base_forget_factor_|. +// the steady-state value `base_forget_factor_`. void Histogram::Add(int value) { RTC_DCHECK(value >= 0); RTC_DCHECK(value < static_cast(buckets_.size())); int vector_sum = 0; // Sum up the vector elements as they are processed. - // Multiply each element in |buckets_| with |forget_factor_|. + // Multiply each element in `buckets_` with `forget_factor_`. for (int& bucket : buckets_) { bucket = (static_cast(bucket) * forget_factor_) >> 15; vector_sum += bucket; } // Increase the probability for the currently observed inter-arrival time - // by 1 - |forget_factor_|. The factor is in Q15, |buckets_| in Q30. + // by 1 - `forget_factor_`. The factor is in Q15, `buckets_` in Q30. // Thus, left-shift 15 steps to obtain result in Q30. buckets_[value] += (32768 - forget_factor_) << 15; vector_sum += (32768 - forget_factor_) << 15; // Add to vector sum. - // |buckets_| should sum up to 1 (in Q30), but it may not due to + // `buckets_` should sum up to 1 (in Q30), but it may not due to // fixed-point rounding errors. vector_sum -= 1 << 30; // Should be zero. Compensate if not. if (vector_sum != 0) { - // Modify a few values early in |buckets_|. + // Modify a few values early in `buckets_`. int flip_sign = vector_sum > 0 ? -1 : 1; for (int& bucket : buckets_) { - // Add/subtract 1/16 of the element, but not more than |vector_sum|. + // Add/subtract 1/16 of the element, but not more than `vector_sum`. int correction = flip_sign * std::min(std::abs(vector_sum), bucket >> 4); bucket += correction; vector_sum += correction; @@ -82,8 +82,8 @@ void Histogram::Add(int value) { ++add_count_; - // Update |forget_factor_| (changes only during the first seconds after a - // reset). The factor converges to |base_forget_factor_|. + // Update `forget_factor_` (changes only during the first seconds after a + // reset). The factor converges to `base_forget_factor_`. if (start_forget_weight_) { if (forget_factor_ != base_forget_factor_) { int old_forget_factor = forget_factor_; @@ -92,7 +92,7 @@ void Histogram::Add(int value) { forget_factor_ = std::max(0, std::min(base_forget_factor_, forget_factor)); // The histogram is updated recursively by forgetting the old histogram - // with |forget_factor_| and adding a new sample multiplied by |1 - + // with `forget_factor_` and adding a new sample multiplied by |1 - // forget_factor_|. We need to make sure that the effective weight on the // new sample is no smaller than those on the old samples, i.e., to // satisfy the following DCHECK. @@ -106,21 +106,21 @@ void Histogram::Add(int value) { int Histogram::Quantile(int probability) { // Find the bucket for which the probability of observing an - // inter-arrival time larger than or equal to |index| is larger than or - // equal to |probability|. The sought probability is estimated using + // inter-arrival time larger than or equal to `index` is larger than or + // equal to `probability`. The sought probability is estimated using // the histogram as the reverse cumulant PDF, i.e., the sum of elements from - // the end up until |index|. Now, since the sum of all elements is 1 + // the end up until `index`. Now, since the sum of all elements is 1 // (in Q30) by definition, and since the solution is often a low value for - // |iat_index|, it is more efficient to start with |sum| = 1 and subtract + // `iat_index`, it is more efficient to start with `sum` = 1 and subtract // elements from the start of the histogram. int inverse_probability = (1 << 30) - probability; - size_t index = 0; // Start from the beginning of |buckets_|. + size_t index = 0; // Start from the beginning of `buckets_`. int sum = 1 << 30; // Assign to 1 in Q30. sum -= buckets_[index]; while ((sum > inverse_probability) && (index < buckets_.size() - 1)) { // Subtract the probabilities one by one until the sum is no longer greater - // than |inverse_probability|. + // than `inverse_probability`. ++index; sum -= buckets_[index]; } diff --git a/modules/audio_coding/neteq/histogram.h b/modules/audio_coding/neteq/histogram.h index 0567e3f183..265a10e00a 100644 --- a/modules/audio_coding/neteq/histogram.h +++ b/modules/audio_coding/neteq/histogram.h @@ -21,7 +21,7 @@ namespace webrtc { class Histogram { public: - // Creates histogram with capacity |num_buckets| and |forget_factor| in Q15. + // Creates histogram with capacity `num_buckets` and `forget_factor` in Q15. Histogram(size_t num_buckets, int forget_factor, absl::optional start_forget_weight = absl::nullopt); @@ -31,10 +31,10 @@ class Histogram { // Resets the histogram to the default start distribution. virtual void Reset(); - // Add entry in bucket |index|. + // Add entry in bucket `index`. virtual void Add(int index); - // Calculates the quantile at |probability| (in Q30) of the histogram + // Calculates the quantile at `probability` (in Q30) of the histogram // distribution. virtual int Quantile(int probability); @@ -42,7 +42,7 @@ class Histogram { virtual int NumBuckets() const; // Returns the probability for each bucket in Q30. - std::vector buckets() const { return buckets_; } + const std::vector& buckets() const { return buckets_; } // Accessors only intended for testing purposes. int base_forget_factor_for_testing() const { return base_forget_factor_; } diff --git a/modules/audio_coding/neteq/merge.cc b/modules/audio_coding/neteq/merge.cc index 770e2e3590..22cf6a7754 100644 --- a/modules/audio_coding/neteq/merge.cc +++ b/modules/audio_coding/neteq/merge.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/neteq/merge.h" -#include #include // memmove, memcpy, memset, size_t #include // min, max @@ -150,13 +149,13 @@ size_t Merge::Process(int16_t* input, (*output)[channel].OverwriteAt(temp_data_.data(), output_length, 0); } - // Copy back the first part of the data to |sync_buffer_| and remove it from - // |output|. + // Copy back the first part of the data to `sync_buffer_` and remove it from + // `output`. sync_buffer_->ReplaceAtIndex(*output, old_length, sync_buffer_->next_index()); output->PopFront(old_length); - // Return new added length. |old_length| samples were borrowed from - // |sync_buffer_|. + // Return new added length. `old_length` samples were borrowed from + // `sync_buffer_`. RTC_DCHECK_GE(output_length, old_length); return output_length - old_length; } @@ -201,7 +200,7 @@ size_t Merge::GetExpandedSignal(size_t* old_length, size_t* expand_period) { // Append one more pitch period each time. expanded_.PushBack(expanded_temp); } - // Trim the length to exactly |required_length|. + // Trim the length to exactly `required_length`. expanded_.PopBack(expanded_.Size() - required_length); } RTC_DCHECK_GE(expanded_.Size(), required_length); @@ -241,17 +240,17 @@ int16_t Merge::SignalScaling(const int16_t* input, // Calculate muting factor to use for new frame. int16_t mute_factor; if (energy_input > energy_expanded) { - // Normalize |energy_input| to 14 bits. + // Normalize `energy_input` to 14 bits. int16_t temp_shift = WebRtcSpl_NormW32(energy_input) - 17; energy_input = WEBRTC_SPL_SHIFT_W32(energy_input, temp_shift); - // Put |energy_expanded| in a domain 14 higher, so that + // Put `energy_expanded` in a domain 14 higher, so that // energy_expanded / energy_input is in Q14. energy_expanded = WEBRTC_SPL_SHIFT_W32(energy_expanded, temp_shift + 14); // Calculate sqrt(energy_expanded / energy_input) in Q14. mute_factor = static_cast( WebRtcSpl_SqrtFloor((energy_expanded / energy_input) << 14)); } else { - // Set to 1 (in Q14) when |expanded| has higher energy than |input|. + // Set to 1 (in Q14) when `expanded` has higher energy than `input`. mute_factor = 16384; } @@ -296,7 +295,7 @@ void Merge::Downsample(const int16_t* input, // there is not much we can do. const size_t temp_len = input_length > signal_offset ? input_length - signal_offset : 0; - // TODO(hlundin): Should |downsamp_temp_len| be corrected for round-off + // TODO(hlundin): Should `downsamp_temp_len` be corrected for round-off // errors? I.e., (temp_len + decimation_factor - 1) / decimation_factor? size_t downsamp_temp_len = temp_len / decimation_factor; if (downsamp_temp_len > 0) { @@ -352,8 +351,8 @@ size_t Merge::CorrelateAndPeakSearch(size_t start_position, // Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.) size_t start_index_downsamp = start_index / (fs_mult_ * 2); - // Calculate a modified |stop_position_downsamp| to account for the increased - // start index |start_index_downsamp| and the effective array length. + // Calculate a modified `stop_position_downsamp` to account for the increased + // start index `start_index_downsamp` and the effective array length. size_t modified_stop_pos = std::min(stop_position_downsamp, kMaxCorrelationLength + pad_length - start_index_downsamp); @@ -373,7 +372,7 @@ size_t Merge::CorrelateAndPeakSearch(size_t start_position, while (((best_correlation_index + input_length) < (timestamps_per_call_ + expand_->overlap_length())) || ((best_correlation_index + input_length) < start_position)) { - RTC_NOTREACHED(); // Should never happen. + RTC_DCHECK_NOTREACHED(); // Should never happen. best_correlation_index += expand_period; // Jump one lag ahead. } return best_correlation_index; diff --git a/modules/audio_coding/neteq/merge.h b/modules/audio_coding/neteq/merge.h index a062a95f6b..2f27106bfe 100644 --- a/modules/audio_coding/neteq/merge.h +++ b/modules/audio_coding/neteq/merge.h @@ -12,7 +12,6 @@ #define MODULES_AUDIO_CODING_NETEQ_MERGE_H_ #include "modules/audio_coding/neteq/audio_multi_vector.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -36,11 +35,14 @@ class Merge { SyncBuffer* sync_buffer); virtual ~Merge(); + Merge(const Merge&) = delete; + Merge& operator=(const Merge&) = delete; + // The main method to produce the audio data. The decoded data is supplied in - // |input|, having |input_length| samples in total for all channels - // (interleaved). The result is written to |output|. The number of channels - // allocated in |output| defines the number of channels that will be used when - // de-interleaving |input|. + // `input`, having `input_length` samples in total for all channels + // (interleaved). The result is written to `output`. The number of channels + // allocated in `output` defines the number of channels that will be used when + // de-interleaving `input`. virtual size_t Process(int16_t* input, size_t input_length, AudioMultiVector* output); @@ -57,29 +59,29 @@ class Merge { static const size_t kInputDownsampLength = 40; static const size_t kMaxCorrelationLength = 60; - // Calls |expand_| to get more expansion data to merge with. The data is - // written to |expanded_signal_|. Returns the length of the expanded data, - // while |expand_period| will be the number of samples in one expansion period - // (typically one pitch period). The value of |old_length| will be the number - // of samples that were taken from the |sync_buffer_|. + // Calls `expand_` to get more expansion data to merge with. The data is + // written to `expanded_signal_`. Returns the length of the expanded data, + // while `expand_period` will be the number of samples in one expansion period + // (typically one pitch period). The value of `old_length` will be the number + // of samples that were taken from the `sync_buffer_`. size_t GetExpandedSignal(size_t* old_length, size_t* expand_period); - // Analyzes |input| and |expanded_signal| and returns muting factor (Q14) to + // Analyzes `input` and `expanded_signal` and returns muting factor (Q14) to // be used on the new data. int16_t SignalScaling(const int16_t* input, size_t input_length, const int16_t* expanded_signal) const; - // Downsamples |input| (|input_length| samples) and |expanded_signal| to + // Downsamples `input` (`input_length` samples) and `expanded_signal` to // 4 kHz sample rate. The downsampled signals are written to - // |input_downsampled_| and |expanded_downsampled_|, respectively. + // `input_downsampled_` and `expanded_downsampled_`, respectively. void Downsample(const int16_t* input, size_t input_length, const int16_t* expanded_signal, size_t expanded_length); - // Calculates cross-correlation between |input_downsampled_| and - // |expanded_downsampled_|, and finds the correlation maximum. The maximizing + // Calculates cross-correlation between `input_downsampled_` and + // `expanded_downsampled_`, and finds the correlation maximum. The maximizing // lag is returned. size_t CorrelateAndPeakSearch(size_t start_position, size_t input_length, @@ -93,8 +95,6 @@ class Merge { int16_t input_downsampled_[kInputDownsampLength]; AudioMultiVector expanded_; std::vector temp_data_; - - RTC_DISALLOW_COPY_AND_ASSIGN(Merge); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/mock/mock_delay_manager.h b/modules/audio_coding/neteq/mock/mock_delay_manager.h index 5b5133eceb..d783f8743b 100644 --- a/modules/audio_coding/neteq/mock/mock_delay_manager.h +++ b/modules/audio_coding/neteq/mock/mock_delay_manager.h @@ -11,9 +11,6 @@ #ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DELAY_MANAGER_H_ #define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DELAY_MANAGER_H_ -#include -#include - #include "api/neteq/tick_timer.h" #include "modules/audio_coding/neteq/delay_manager.h" #include "test/gmock.h" @@ -22,20 +19,9 @@ namespace webrtc { class MockDelayManager : public DelayManager { public: - MockDelayManager(size_t max_packets_in_buffer, - int base_minimum_delay_ms, - int histogram_quantile, - absl::optional resample_interval_ms, - int max_history_ms, - const TickTimer* tick_timer, - std::unique_ptr histogram) - : DelayManager(max_packets_in_buffer, - base_minimum_delay_ms, - histogram_quantile, - resample_interval_ms, - max_history_ms, - tick_timer, - std::move(histogram)) {} + MockDelayManager(const MockDelayManager::Config& config, + const TickTimer* tick_timer) + : DelayManager(config, tick_timer) {} MOCK_METHOD(int, TargetDelayMs, (), (const)); }; diff --git a/modules/audio_coding/neteq/nack_tracker.cc b/modules/audio_coding/neteq/nack_tracker.cc index 9a873eee07..35afb736c8 100644 --- a/modules/audio_coding/neteq/nack_tracker.cc +++ b/modules/audio_coding/neteq/nack_tracker.cc @@ -10,39 +10,53 @@ #include "modules/audio_coding/neteq/nack_tracker.h" -#include - #include #include #include "rtc_base/checks.h" +#include "rtc_base/experiments/struct_parameters_parser.h" +#include "rtc_base/logging.h" +#include "system_wrappers/include/field_trial.h" namespace webrtc { namespace { const int kDefaultSampleRateKhz = 48; -const int kDefaultPacketSizeMs = 20; +const int kMaxPacketSizeMs = 120; +constexpr char kNackTrackerConfigFieldTrial[] = + "WebRTC-Audio-NetEqNackTrackerConfig"; } // namespace -NackTracker::NackTracker(int nack_threshold_packets) - : nack_threshold_packets_(nack_threshold_packets), - sequence_num_last_received_rtp_(0), +NackTracker::Config::Config() { + auto parser = StructParametersParser::Create( + "packet_loss_forget_factor", &packet_loss_forget_factor, + "ms_per_loss_percent", &ms_per_loss_percent, "never_nack_multiple_times", + &never_nack_multiple_times, "require_valid_rtt", &require_valid_rtt, + "max_loss_rate", &max_loss_rate); + parser->Parse( + webrtc::field_trial::FindFullName(kNackTrackerConfigFieldTrial)); + RTC_LOG(LS_INFO) << "Nack tracker config:" + " packet_loss_forget_factor=" + << packet_loss_forget_factor + << " ms_per_loss_percent=" << ms_per_loss_percent + << " never_nack_multiple_times=" << never_nack_multiple_times + << " require_valid_rtt=" << require_valid_rtt + << " max_loss_rate=" << max_loss_rate; +} + +NackTracker::NackTracker() + : sequence_num_last_received_rtp_(0), timestamp_last_received_rtp_(0), any_rtp_received_(false), sequence_num_last_decoded_rtp_(0), timestamp_last_decoded_rtp_(0), any_rtp_decoded_(false), sample_rate_khz_(kDefaultSampleRateKhz), - samples_per_packet_(sample_rate_khz_ * kDefaultPacketSizeMs), max_nack_list_size_(kNackListSizeLimit) {} NackTracker::~NackTracker() = default; -NackTracker* NackTracker::Create(int nack_threshold_packets) { - return new NackTracker(nack_threshold_packets); -} - void NackTracker::UpdateSampleRate(int sample_rate_hz) { RTC_DCHECK_GT(sample_rate_hz, 0); sample_rate_khz_ = sample_rate_hz / 1000; @@ -75,69 +89,62 @@ void NackTracker::UpdateLastReceivedPacket(uint16_t sequence_number, if (IsNewerSequenceNumber(sequence_num_last_received_rtp_, sequence_number)) return; - UpdateSamplesPerPacket(sequence_number, timestamp); + UpdatePacketLossRate(sequence_number - sequence_num_last_received_rtp_ - 1); - UpdateList(sequence_number); + UpdateList(sequence_number, timestamp); sequence_num_last_received_rtp_ = sequence_number; timestamp_last_received_rtp_ = timestamp; LimitNackListSize(); } -void NackTracker::UpdateSamplesPerPacket( +absl::optional NackTracker::GetSamplesPerPacket( uint16_t sequence_number_current_received_rtp, - uint32_t timestamp_current_received_rtp) { + uint32_t timestamp_current_received_rtp) const { uint32_t timestamp_increase = timestamp_current_received_rtp - timestamp_last_received_rtp_; uint16_t sequence_num_increase = sequence_number_current_received_rtp - sequence_num_last_received_rtp_; - samples_per_packet_ = timestamp_increase / sequence_num_increase; -} - -void NackTracker::UpdateList(uint16_t sequence_number_current_received_rtp) { - // Some of the packets which were considered late, now are considered missing. - ChangeFromLateToMissing(sequence_number_current_received_rtp); - - if (IsNewerSequenceNumber(sequence_number_current_received_rtp, - sequence_num_last_received_rtp_ + 1)) - AddToList(sequence_number_current_received_rtp); -} - -void NackTracker::ChangeFromLateToMissing( - uint16_t sequence_number_current_received_rtp) { - NackList::const_iterator lower_bound = - nack_list_.lower_bound(static_cast( - sequence_number_current_received_rtp - nack_threshold_packets_)); - - for (NackList::iterator it = nack_list_.begin(); it != lower_bound; ++it) - it->second.is_missing = true; -} - -uint32_t NackTracker::EstimateTimestamp(uint16_t sequence_num) { - uint16_t sequence_num_diff = sequence_num - sequence_num_last_received_rtp_; - return sequence_num_diff * samples_per_packet_ + timestamp_last_received_rtp_; + int samples_per_packet = timestamp_increase / sequence_num_increase; + if (samples_per_packet == 0 || + samples_per_packet > kMaxPacketSizeMs * sample_rate_khz_) { + // Not a valid samples per packet. + return absl::nullopt; + } + return samples_per_packet; } -void NackTracker::AddToList(uint16_t sequence_number_current_received_rtp) { +void NackTracker::UpdateList(uint16_t sequence_number_current_received_rtp, + uint32_t timestamp_current_received_rtp) { + if (!IsNewerSequenceNumber(sequence_number_current_received_rtp, + sequence_num_last_received_rtp_ + 1)) { + return; + } RTC_DCHECK(!any_rtp_decoded_ || IsNewerSequenceNumber(sequence_number_current_received_rtp, sequence_num_last_decoded_rtp_)); - // Packets with sequence numbers older than |upper_bound_missing| are - // considered missing, and the rest are considered late. - uint16_t upper_bound_missing = - sequence_number_current_received_rtp - nack_threshold_packets_; + absl::optional samples_per_packet = GetSamplesPerPacket( + sequence_number_current_received_rtp, timestamp_current_received_rtp); + if (!samples_per_packet) { + return; + } for (uint16_t n = sequence_num_last_received_rtp_ + 1; IsNewerSequenceNumber(sequence_number_current_received_rtp, n); ++n) { - bool is_missing = IsNewerSequenceNumber(upper_bound_missing, n); - uint32_t timestamp = EstimateTimestamp(n); - NackElement nack_element(TimeToPlay(timestamp), timestamp, is_missing); + uint32_t timestamp = EstimateTimestamp(n, *samples_per_packet); + NackElement nack_element(TimeToPlay(timestamp), timestamp); nack_list_.insert(nack_list_.end(), std::make_pair(n, nack_element)); } } +uint32_t NackTracker::EstimateTimestamp(uint16_t sequence_num, + int samples_per_packet) { + uint16_t sequence_num_diff = sequence_num - sequence_num_last_received_rtp_; + return sequence_num_diff * samples_per_packet + timestamp_last_received_rtp_; +} + void NackTracker::UpdateEstimatedPlayoutTimeBy10ms() { while (!nack_list_.empty() && nack_list_.begin()->second.time_to_play_ms <= 10) @@ -191,7 +198,6 @@ void NackTracker::Reset() { timestamp_last_decoded_rtp_ = 0; any_rtp_decoded_ = false; sample_rate_khz_ = kDefaultSampleRateKhz; - samples_per_packet_ = sample_rate_khz_ * kDefaultPacketSizeMs; } void NackTracker::SetMaxNackListSize(size_t max_nack_list_size) { @@ -216,17 +222,42 @@ int64_t NackTracker::TimeToPlay(uint32_t timestamp) const { } // We don't erase elements with time-to-play shorter than round-trip-time. -std::vector NackTracker::GetNackList( - int64_t round_trip_time_ms) const { +std::vector NackTracker::GetNackList(int64_t round_trip_time_ms) { RTC_DCHECK_GE(round_trip_time_ms, 0); std::vector sequence_numbers; + if (config_.require_valid_rtt && round_trip_time_ms == 0) { + return sequence_numbers; + } + if (packet_loss_rate_ > + static_cast(config_.max_loss_rate * (1 << 30))) { + return sequence_numbers; + } + // The estimated packet loss is between 0 and 1, so we need to multiply by 100 + // here. + int max_wait_ms = + 100.0 * config_.ms_per_loss_percent * packet_loss_rate_ / (1 << 30); for (NackList::const_iterator it = nack_list_.begin(); it != nack_list_.end(); ++it) { - if (it->second.is_missing && - it->second.time_to_play_ms > round_trip_time_ms) + int64_t time_since_packet_ms = + (timestamp_last_received_rtp_ - it->second.estimated_timestamp) / + sample_rate_khz_; + if (it->second.time_to_play_ms > round_trip_time_ms || + time_since_packet_ms + round_trip_time_ms < max_wait_ms) sequence_numbers.push_back(it->first); } + if (config_.never_nack_multiple_times) { + nack_list_.clear(); + } return sequence_numbers; } +void NackTracker::UpdatePacketLossRate(int packets_lost) { + const uint64_t alpha_q30 = (1 << 30) * config_.packet_loss_forget_factor; + // Exponential filter. + packet_loss_rate_ = (alpha_q30 * packet_loss_rate_) >> 30; + for (int i = 0; i < packets_lost; ++i) { + packet_loss_rate_ = + ((alpha_q30 * packet_loss_rate_) >> 30) + ((1 << 30) - alpha_q30); + } +} } // namespace webrtc diff --git a/modules/audio_coding/neteq/nack_tracker.h b/modules/audio_coding/neteq/nack_tracker.h index 5a567349fe..0cc95b0882 100644 --- a/modules/audio_coding/neteq/nack_tracker.h +++ b/modules/audio_coding/neteq/nack_tracker.h @@ -17,6 +17,7 @@ #include #include +#include "absl/types/optional.h" #include "modules/include/module_common_types_public.h" #include "rtc_base/gtest_prod_util.h" @@ -30,12 +31,9 @@ // Every time 10ms audio is pulled from NetEq LastDecodedPacket() should be // called, and time-to-play is updated at that moment. // -// If packet N is received, any packet prior to |N - NackThreshold| which is not -// arrived is considered lost, and should be labeled as "missing" (the size of -// the list might be limited and older packet eliminated from the list). Packets -// |N - NackThreshold|, |N - NackThreshold + 1|, ..., |N - 1| are considered -// "late." A "late" packet with sequence number K is changed to "missing" any -// time a packet with sequence number newer than |K + NackList| is arrived. +// If packet N is received, any packet prior to N which has not arrived is +// considered lost, and should be labeled as "missing" (the size of +// the list might be limited and older packet eliminated from the list). // // The NackTracker class has to know about the sample rate of the packets to // compute time-to-play. So sample rate should be set as soon as the first @@ -56,16 +54,14 @@ class NackTracker { // A limit for the size of the NACK list. static const size_t kNackListSizeLimit = 500; // 10 seconds for 20 ms frame // packets. - // Factory method. - static NackTracker* Create(int nack_threshold_packets); - + NackTracker(); ~NackTracker(); // Set a maximum for the size of the NACK list. If the last received packet // has sequence number of N, then NACK list will not contain any element - // with sequence number earlier than N - |max_nack_list_size|. + // with sequence number earlier than N - `max_nack_list_size`. // - // The largest maximum size is defined by |kNackListSizeLimit| + // The largest maximum size is defined by `kNackListSizeLimit` void SetMaxNackListSize(size_t max_nack_list_size); // Set the sampling rate. @@ -87,39 +83,54 @@ class NackTracker { // Get a list of "missing" packets which have expected time-to-play larger // than the given round-trip-time (in milliseconds). // Note: Late packets are not included. - std::vector GetNackList(int64_t round_trip_time_ms) const; + // Calling this method multiple times may give different results, since the + // internal nack list may get flushed if never_nack_multiple_times_ is true. + std::vector GetNackList(int64_t round_trip_time_ms); // Reset to default values. The NACK list is cleared. - // |nack_threshold_packets_| & |max_nack_list_size_| preserve their values. + // `max_nack_list_size_` preserves its value. void Reset(); + // Returns the estimated packet loss rate in Q30, for testing only. + uint32_t GetPacketLossRateForTest() { return packet_loss_rate_; } + private: // This test need to access the private method GetNackList(). FRIEND_TEST_ALL_PREFIXES(NackTrackerTest, EstimateTimestampAndTimeToPlay); + // Options that can be configured via field trial. + struct Config { + Config(); + + // The exponential decay factor used to estimate the packet loss rate. + double packet_loss_forget_factor = 0.996; + // How many additional ms we are willing to wait (at most) for nacked + // packets for each additional percentage of packet loss. + int ms_per_loss_percent = 20; + // If true, never nack packets more than once. + bool never_nack_multiple_times = false; + // Only nack if the RTT is valid. + bool require_valid_rtt = false; + // Do not nack if the loss rate is above this value. + double max_loss_rate = 1.0; + }; + struct NackElement { - NackElement(int64_t initial_time_to_play_ms, - uint32_t initial_timestamp, - bool missing) + NackElement(int64_t initial_time_to_play_ms, uint32_t initial_timestamp) : time_to_play_ms(initial_time_to_play_ms), - estimated_timestamp(initial_timestamp), - is_missing(missing) {} + estimated_timestamp(initial_timestamp) {} // Estimated time (ms) left for this packet to be decoded. This estimate is // updated every time jitter buffer decodes a packet. int64_t time_to_play_ms; // A guess about the timestamp of the missing packet, it is used for - // estimation of |time_to_play_ms|. The estimate might be slightly wrong if + // estimation of `time_to_play_ms`. The estimate might be slightly wrong if // there has been frame-size change since the last received packet and the // missing packet. However, the risk of this is low, and in case of such // errors, there will be a minor misestimation in time-to-play of missing // packets. This will have a very minor effect on NACK performance. uint32_t estimated_timestamp; - - // True if the packet is considered missing. Otherwise indicates packet is - // late. - bool is_missing; }; class NackListCompare { @@ -132,53 +143,41 @@ class NackTracker { typedef std::map NackList; - // Constructor. - explicit NackTracker(int nack_threshold_packets); - // This API is used only for testing to assess whether time-to-play is // computed correctly. NackList GetNackList() const; - // Given the |sequence_number_current_received_rtp| of currently received RTP, - // recognize packets which are not arrive and add to the list. - void AddToList(uint16_t sequence_number_current_received_rtp); - // This function subtracts 10 ms of time-to-play for all packets in NACK list. // This is called when 10 ms elapsed with no new RTP packet decoded. void UpdateEstimatedPlayoutTimeBy10ms(); - // Given the |sequence_number_current_received_rtp| and - // |timestamp_current_received_rtp| of currently received RTP update number - // of samples per packet. - void UpdateSamplesPerPacket(uint16_t sequence_number_current_received_rtp, - uint32_t timestamp_current_received_rtp); - - // Given the |sequence_number_current_received_rtp| of currently received RTP - // update the list. That is; some packets will change from late to missing, - // some packets are inserted as missing and some inserted as late. - void UpdateList(uint16_t sequence_number_current_received_rtp); + // Returns a valid number of samples per packet given the current received + // sequence number and timestamp or nullopt of none could be computed. + absl::optional GetSamplesPerPacket( + uint16_t sequence_number_current_received_rtp, + uint32_t timestamp_current_received_rtp) const; - // Packets which are considered late for too long (according to - // |nack_threshold_packets_|) are flagged as missing. - void ChangeFromLateToMissing(uint16_t sequence_number_current_received_rtp); + // Given the `sequence_number_current_received_rtp` of currently received RTP + // update the list. Packets that are older than the received packet are added + // to the nack list. + void UpdateList(uint16_t sequence_number_current_received_rtp, + uint32_t timestamp_current_received_rtp); // Packets which have sequence number older that - // |sequence_num_last_received_rtp_| - |max_nack_list_size_| are removed + // `sequence_num_last_received_rtp_` - `max_nack_list_size_` are removed // from the NACK list. void LimitNackListSize(); // Estimate timestamp of a missing packet given its sequence number. - uint32_t EstimateTimestamp(uint16_t sequence_number); + uint32_t EstimateTimestamp(uint16_t sequence_number, int samples_per_packet); // Compute time-to-play given a timestamp. int64_t TimeToPlay(uint32_t timestamp) const; - // If packet N is arrived, any packet prior to N - |nack_threshold_packets_| - // which is not arrived is considered missing, and should be in NACK list. - // Also any packet in the range of N-1 and N - |nack_threshold_packets_|, - // exclusive, which is not arrived is considered late, and should should be - // in the list of late packets. - const int nack_threshold_packets_; + // Updates the estimated packet lost rate. + void UpdatePacketLossRate(int packets_lost); + + const Config config_; // Valid if a packet is received. uint16_t sequence_num_last_received_rtp_; @@ -192,18 +191,17 @@ class NackTracker { int sample_rate_khz_; // Sample rate in kHz. - // Number of samples per packet. We update this every time we receive a - // packet, not only for consecutive packets. - int samples_per_packet_; - // A list of missing packets to be retransmitted. Components of the list // contain the sequence number of missing packets and the estimated time that // each pack is going to be played out. NackList nack_list_; // NACK list will not keep track of missing packets prior to - // |sequence_num_last_received_rtp_| - |max_nack_list_size_|. + // `sequence_num_last_received_rtp_` - `max_nack_list_size_`. size_t max_nack_list_size_; + + // Current estimate of the packet loss rate in Q30. + uint32_t packet_loss_rate_ = 0; }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/nack_tracker_unittest.cc b/modules/audio_coding/neteq/nack_tracker_unittest.cc index a44f41ba41..bcc5120ff3 100644 --- a/modules/audio_coding/neteq/nack_tracker_unittest.cc +++ b/modules/audio_coding/neteq/nack_tracker_unittest.cc @@ -16,12 +16,12 @@ #include #include "modules/audio_coding/include/audio_coding_module_typedefs.h" +#include "test/field_trial.h" #include "test/gtest.h" namespace webrtc { namespace { -const int kNackThreshold = 3; const int kSampleRateHz = 16000; const int kPacketSizeMs = 30; const uint32_t kTimestampIncrement = 480; // 30 ms. @@ -54,55 +54,31 @@ bool IsNackListCorrect(const std::vector& nack_list, } // namespace TEST(NackTrackerTest, EmptyListWhenNoPacketLoss) { - std::unique_ptr nack(NackTracker::Create(kNackThreshold)); - nack->UpdateSampleRate(kSampleRateHz); + NackTracker nack; + nack.UpdateSampleRate(kSampleRateHz); int seq_num = 1; uint32_t timestamp = 0; std::vector nack_list; for (int n = 0; n < 100; n++) { - nack->UpdateLastReceivedPacket(seq_num, timestamp); - nack_list = nack->GetNackList(kShortRoundTripTimeMs); + nack.UpdateLastReceivedPacket(seq_num, timestamp); + nack_list = nack.GetNackList(kShortRoundTripTimeMs); seq_num++; timestamp += kTimestampIncrement; - nack_list = nack->GetNackList(kShortRoundTripTimeMs); + nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_TRUE(nack_list.empty()); } } -TEST(NackTrackerTest, NoNackIfReorderWithinNackThreshold) { - std::unique_ptr nack(NackTracker::Create(kNackThreshold)); - nack->UpdateSampleRate(kSampleRateHz); - - int seq_num = 1; - uint32_t timestamp = 0; - std::vector nack_list; - - nack->UpdateLastReceivedPacket(seq_num, timestamp); - nack_list = nack->GetNackList(kShortRoundTripTimeMs); - EXPECT_TRUE(nack_list.empty()); - int num_late_packets = kNackThreshold + 1; - - // Push in reverse order - while (num_late_packets > 0) { - nack->UpdateLastReceivedPacket( - seq_num + num_late_packets, - timestamp + num_late_packets * kTimestampIncrement); - nack_list = nack->GetNackList(kShortRoundTripTimeMs); - EXPECT_TRUE(nack_list.empty()); - num_late_packets--; - } -} - TEST(NackTrackerTest, LatePacketsMovedToNackThenNackListDoesNotChange) { const uint16_t kSequenceNumberLostPackets[] = {2, 3, 4, 5, 6, 7, 8, 9}; static const int kNumAllLostPackets = sizeof(kSequenceNumberLostPackets) / sizeof(kSequenceNumberLostPackets[0]); for (int k = 0; k < 2; k++) { // Two iteration with/without wrap around. - std::unique_ptr nack(NackTracker::Create(kNackThreshold)); - nack->UpdateSampleRate(kSampleRateHz); + NackTracker nack; + nack.UpdateSampleRate(kSampleRateHz); uint16_t sequence_num_lost_packets[kNumAllLostPackets]; for (int n = 0; n < kNumAllLostPackets; n++) { @@ -115,27 +91,25 @@ TEST(NackTrackerTest, LatePacketsMovedToNackThenNackListDoesNotChange) { uint32_t timestamp = 0; std::vector nack_list; - nack->UpdateLastReceivedPacket(seq_num, timestamp); - nack_list = nack->GetNackList(kShortRoundTripTimeMs); + nack.UpdateLastReceivedPacket(seq_num, timestamp); + nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_TRUE(nack_list.empty()); seq_num = sequence_num_lost_packets[kNumAllLostPackets - 1] + 1; timestamp += kTimestampIncrement * (kNumAllLostPackets + 1); - int num_lost_packets = std::max(0, kNumAllLostPackets - kNackThreshold); + int num_lost_packets = std::max(0, kNumAllLostPackets); - for (int n = 0; n < kNackThreshold + 1; ++n) { - nack->UpdateLastReceivedPacket(seq_num, timestamp); - nack_list = nack->GetNackList(kShortRoundTripTimeMs); - EXPECT_TRUE(IsNackListCorrect(nack_list, sequence_num_lost_packets, - num_lost_packets)); - seq_num++; - timestamp += kTimestampIncrement; - num_lost_packets++; - } + nack.UpdateLastReceivedPacket(seq_num, timestamp); + nack_list = nack.GetNackList(kShortRoundTripTimeMs); + EXPECT_TRUE(IsNackListCorrect(nack_list, sequence_num_lost_packets, + num_lost_packets)); + seq_num++; + timestamp += kTimestampIncrement; + num_lost_packets++; for (int n = 0; n < 100; ++n) { - nack->UpdateLastReceivedPacket(seq_num, timestamp); - nack_list = nack->GetNackList(kShortRoundTripTimeMs); + nack.UpdateLastReceivedPacket(seq_num, timestamp); + nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_TRUE(IsNackListCorrect(nack_list, sequence_num_lost_packets, kNumAllLostPackets)); seq_num++; @@ -150,8 +124,8 @@ TEST(NackTrackerTest, ArrivedPacketsAreRemovedFromNackList) { sizeof(kSequenceNumberLostPackets[0]); for (int k = 0; k < 2; ++k) { // Two iteration with/without wrap around. - std::unique_ptr nack(NackTracker::Create(kNackThreshold)); - nack->UpdateSampleRate(kSampleRateHz); + NackTracker nack; + nack.UpdateSampleRate(kSampleRateHz); uint16_t sequence_num_lost_packets[kNumAllLostPackets]; for (int n = 0; n < kNumAllLostPackets; ++n) { @@ -162,8 +136,8 @@ TEST(NackTrackerTest, ArrivedPacketsAreRemovedFromNackList) { uint16_t seq_num = sequence_num_lost_packets[0] - 1; uint32_t timestamp = 0; - nack->UpdateLastReceivedPacket(seq_num, timestamp); - std::vector nack_list = nack->GetNackList(kShortRoundTripTimeMs); + nack.UpdateLastReceivedPacket(seq_num, timestamp); + std::vector nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_TRUE(nack_list.empty()); size_t index_retransmitted_rtp = 0; @@ -171,16 +145,16 @@ TEST(NackTrackerTest, ArrivedPacketsAreRemovedFromNackList) { seq_num = sequence_num_lost_packets[kNumAllLostPackets - 1] + 1; timestamp += kTimestampIncrement * (kNumAllLostPackets + 1); - size_t num_lost_packets = std::max(0, kNumAllLostPackets - kNackThreshold); + size_t num_lost_packets = kNumAllLostPackets; for (int n = 0; n < kNumAllLostPackets; ++n) { // Number of lost packets does not change for the first // |kNackThreshold + 1| packets, one is added to the list and one is // removed. Thereafter, the list shrinks every iteration. - if (n >= kNackThreshold + 1) + if (n >= 1) num_lost_packets--; - nack->UpdateLastReceivedPacket(seq_num, timestamp); - nack_list = nack->GetNackList(kShortRoundTripTimeMs); + nack.UpdateLastReceivedPacket(seq_num, timestamp); + nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_TRUE(IsNackListCorrect( nack_list, &sequence_num_lost_packets[index_retransmitted_rtp], num_lost_packets)); @@ -188,13 +162,13 @@ TEST(NackTrackerTest, ArrivedPacketsAreRemovedFromNackList) { timestamp += kTimestampIncrement; // Retransmission of a lost RTP. - nack->UpdateLastReceivedPacket( + nack.UpdateLastReceivedPacket( sequence_num_lost_packets[index_retransmitted_rtp], timestamp_retransmitted_rtp); index_retransmitted_rtp++; timestamp_retransmitted_rtp += kTimestampIncrement; - nack_list = nack->GetNackList(kShortRoundTripTimeMs); + nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_TRUE(IsNackListCorrect( nack_list, &sequence_num_lost_packets[index_retransmitted_rtp], num_lost_packets - 1)); // One less lost packet in the list. @@ -212,13 +186,13 @@ TEST(NackTrackerTest, EstimateTimestampAndTimeToPlay) { sizeof(kLostPackets) / sizeof(kLostPackets[0]); for (int k = 0; k < 4; ++k) { - std::unique_ptr nack(NackTracker::Create(kNackThreshold)); - nack->UpdateSampleRate(kSampleRateHz); + NackTracker nack; + nack.UpdateSampleRate(kSampleRateHz); - // Sequence number wrap around if |k| is 2 or 3; + // Sequence number wrap around if `k` is 2 or 3; int seq_num_offset = (k < 2) ? 0 : 65531; - // Timestamp wrap around if |k| is 1 or 3. + // Timestamp wrap around if `k` is 1 or 3. uint32_t timestamp_offset = (k & 0x1) ? static_cast(0xffffffff) - 6 : 0; @@ -238,23 +212,23 @@ TEST(NackTrackerTest, EstimateTimestampAndTimeToPlay) { const uint32_t first_timestamp = timestamp; // Two consecutive packets to have a correct estimate of timestamp increase. - nack->UpdateLastReceivedPacket(seq_num, timestamp); + nack.UpdateLastReceivedPacket(seq_num, timestamp); seq_num++; timestamp += kTimestampIncrement; - nack->UpdateLastReceivedPacket(seq_num, timestamp); + nack.UpdateLastReceivedPacket(seq_num, timestamp); // A packet after the last one which is supposed to be lost. seq_num = seq_num_lost_packets[kNumAllLostPackets - 1] + 1; timestamp = timestamp_lost_packets[kNumAllLostPackets - 1] + kTimestampIncrement; - nack->UpdateLastReceivedPacket(seq_num, timestamp); + nack.UpdateLastReceivedPacket(seq_num, timestamp); - NackTracker::NackList nack_list = nack->GetNackList(); + NackTracker::NackList nack_list = nack.GetNackList(); EXPECT_EQ(static_cast(kNumAllLostPackets), nack_list.size()); // Pretend the first packet is decoded. - nack->UpdateLastDecodedPacket(first_seq_num, first_timestamp); - nack_list = nack->GetNackList(); + nack.UpdateLastDecodedPacket(first_seq_num, first_timestamp); + nack_list = nack.GetNackList(); NackTracker::NackList::iterator it = nack_list.begin(); while (it != nack_list.end()) { @@ -268,8 +242,8 @@ TEST(NackTrackerTest, EstimateTimestampAndTimeToPlay) { // Pretend 10 ms is passed, and we had pulled audio from NetEq, it still // reports the same sequence number as decoded, time-to-play should be // updated by 10 ms. - nack->UpdateLastDecodedPacket(first_seq_num, first_timestamp); - nack_list = nack->GetNackList(); + nack.UpdateLastDecodedPacket(first_seq_num, first_timestamp); + nack_list = nack.GetNackList(); it = nack_list.begin(); while (it != nack_list.end()) { seq_num = it->first - seq_num_offset; @@ -283,118 +257,118 @@ TEST(NackTrackerTest, EstimateTimestampAndTimeToPlay) { TEST(NackTrackerTest, MissingPacketsPriorToLastDecodedRtpShouldNotBeInNackList) { for (int m = 0; m < 2; ++m) { - uint16_t seq_num_offset = (m == 0) ? 0 : 65531; // Wrap around if |m| is 1. - std::unique_ptr nack(NackTracker::Create(kNackThreshold)); - nack->UpdateSampleRate(kSampleRateHz); + uint16_t seq_num_offset = (m == 0) ? 0 : 65531; // Wrap around if `m` is 1. + NackTracker nack; + nack.UpdateSampleRate(kSampleRateHz); // Two consecutive packets to have a correct estimate of timestamp increase. uint16_t seq_num = 0; - nack->UpdateLastReceivedPacket(seq_num_offset + seq_num, - seq_num * kTimestampIncrement); + nack.UpdateLastReceivedPacket(seq_num_offset + seq_num, + seq_num * kTimestampIncrement); seq_num++; - nack->UpdateLastReceivedPacket(seq_num_offset + seq_num, - seq_num * kTimestampIncrement); + nack.UpdateLastReceivedPacket(seq_num_offset + seq_num, + seq_num * kTimestampIncrement); // Skip 10 packets (larger than NACK threshold). const int kNumLostPackets = 10; seq_num += kNumLostPackets + 1; - nack->UpdateLastReceivedPacket(seq_num_offset + seq_num, - seq_num * kTimestampIncrement); + nack.UpdateLastReceivedPacket(seq_num_offset + seq_num, + seq_num * kTimestampIncrement); - const size_t kExpectedListSize = kNumLostPackets - kNackThreshold; - std::vector nack_list = nack->GetNackList(kShortRoundTripTimeMs); + const size_t kExpectedListSize = kNumLostPackets; + std::vector nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_EQ(kExpectedListSize, nack_list.size()); for (int k = 0; k < 2; ++k) { // Decoding of the first and the second arrived packets. for (int n = 0; n < kPacketSizeMs / 10; ++n) { - nack->UpdateLastDecodedPacket(seq_num_offset + k, - k * kTimestampIncrement); - nack_list = nack->GetNackList(kShortRoundTripTimeMs); + nack.UpdateLastDecodedPacket(seq_num_offset + k, + k * kTimestampIncrement); + nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_EQ(kExpectedListSize, nack_list.size()); } } // Decoding of the last received packet. - nack->UpdateLastDecodedPacket(seq_num + seq_num_offset, - seq_num * kTimestampIncrement); - nack_list = nack->GetNackList(kShortRoundTripTimeMs); + nack.UpdateLastDecodedPacket(seq_num + seq_num_offset, + seq_num * kTimestampIncrement); + nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_TRUE(nack_list.empty()); // Make sure list of late packets is also empty. To check that, push few // packets, if the late list is not empty its content will pop up in NACK // list. - for (int n = 0; n < kNackThreshold + 10; ++n) { + for (int n = 0; n < 10; ++n) { seq_num++; - nack->UpdateLastReceivedPacket(seq_num_offset + seq_num, - seq_num * kTimestampIncrement); - nack_list = nack->GetNackList(kShortRoundTripTimeMs); + nack.UpdateLastReceivedPacket(seq_num_offset + seq_num, + seq_num * kTimestampIncrement); + nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_TRUE(nack_list.empty()); } } } TEST(NackTrackerTest, Reset) { - std::unique_ptr nack(NackTracker::Create(kNackThreshold)); - nack->UpdateSampleRate(kSampleRateHz); + NackTracker nack; + nack.UpdateSampleRate(kSampleRateHz); // Two consecutive packets to have a correct estimate of timestamp increase. uint16_t seq_num = 0; - nack->UpdateLastReceivedPacket(seq_num, seq_num * kTimestampIncrement); + nack.UpdateLastReceivedPacket(seq_num, seq_num * kTimestampIncrement); seq_num++; - nack->UpdateLastReceivedPacket(seq_num, seq_num * kTimestampIncrement); + nack.UpdateLastReceivedPacket(seq_num, seq_num * kTimestampIncrement); // Skip 10 packets (larger than NACK threshold). const int kNumLostPackets = 10; seq_num += kNumLostPackets + 1; - nack->UpdateLastReceivedPacket(seq_num, seq_num * kTimestampIncrement); + nack.UpdateLastReceivedPacket(seq_num, seq_num * kTimestampIncrement); - const size_t kExpectedListSize = kNumLostPackets - kNackThreshold; - std::vector nack_list = nack->GetNackList(kShortRoundTripTimeMs); + const size_t kExpectedListSize = kNumLostPackets; + std::vector nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_EQ(kExpectedListSize, nack_list.size()); - nack->Reset(); - nack_list = nack->GetNackList(kShortRoundTripTimeMs); + nack.Reset(); + nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_TRUE(nack_list.empty()); } TEST(NackTrackerTest, ListSizeAppliedFromBeginning) { const size_t kNackListSize = 10; for (int m = 0; m < 2; ++m) { - uint16_t seq_num_offset = (m == 0) ? 0 : 65525; // Wrap around if |m| is 1. - std::unique_ptr nack(NackTracker::Create(kNackThreshold)); - nack->UpdateSampleRate(kSampleRateHz); - nack->SetMaxNackListSize(kNackListSize); + uint16_t seq_num_offset = (m == 0) ? 0 : 65525; // Wrap around if `m` is 1. + NackTracker nack; + nack.UpdateSampleRate(kSampleRateHz); + nack.SetMaxNackListSize(kNackListSize); uint16_t seq_num = seq_num_offset; uint32_t timestamp = 0x12345678; - nack->UpdateLastReceivedPacket(seq_num, timestamp); + nack.UpdateLastReceivedPacket(seq_num, timestamp); // Packet lost more than NACK-list size limit. - uint16_t num_lost_packets = kNackThreshold + kNackListSize + 5; + uint16_t num_lost_packets = kNackListSize + 5; seq_num += num_lost_packets + 1; timestamp += (num_lost_packets + 1) * kTimestampIncrement; - nack->UpdateLastReceivedPacket(seq_num, timestamp); + nack.UpdateLastReceivedPacket(seq_num, timestamp); - std::vector nack_list = nack->GetNackList(kShortRoundTripTimeMs); - EXPECT_EQ(kNackListSize - kNackThreshold, nack_list.size()); + std::vector nack_list = nack.GetNackList(kShortRoundTripTimeMs); + EXPECT_EQ(kNackListSize, nack_list.size()); } } TEST(NackTrackerTest, ChangeOfListSizeAppliedAndOldElementsRemoved) { const size_t kNackListSize = 10; for (int m = 0; m < 2; ++m) { - uint16_t seq_num_offset = (m == 0) ? 0 : 65525; // Wrap around if |m| is 1. - std::unique_ptr nack(NackTracker::Create(kNackThreshold)); - nack->UpdateSampleRate(kSampleRateHz); + uint16_t seq_num_offset = (m == 0) ? 0 : 65525; // Wrap around if `m` is 1. + NackTracker nack; + nack.UpdateSampleRate(kSampleRateHz); uint16_t seq_num = seq_num_offset; uint32_t timestamp = 0x87654321; - nack->UpdateLastReceivedPacket(seq_num, timestamp); + nack.UpdateLastReceivedPacket(seq_num, timestamp); // Packet lost more than NACK-list size limit. - uint16_t num_lost_packets = kNackThreshold + kNackListSize + 5; + uint16_t num_lost_packets = kNackListSize + 5; std::unique_ptr seq_num_lost(new uint16_t[num_lost_packets]); for (int n = 0; n < num_lost_packets; ++n) { @@ -403,39 +377,26 @@ TEST(NackTrackerTest, ChangeOfListSizeAppliedAndOldElementsRemoved) { ++seq_num; timestamp += (num_lost_packets + 1) * kTimestampIncrement; - nack->UpdateLastReceivedPacket(seq_num, timestamp); - size_t expected_size = num_lost_packets - kNackThreshold; + nack.UpdateLastReceivedPacket(seq_num, timestamp); + size_t expected_size = num_lost_packets; - std::vector nack_list = nack->GetNackList(kShortRoundTripTimeMs); + std::vector nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_EQ(expected_size, nack_list.size()); - nack->SetMaxNackListSize(kNackListSize); - expected_size = kNackListSize - kNackThreshold; - nack_list = nack->GetNackList(kShortRoundTripTimeMs); + nack.SetMaxNackListSize(kNackListSize); + expected_size = kNackListSize; + nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_TRUE(IsNackListCorrect( nack_list, &seq_num_lost[num_lost_packets - kNackListSize], expected_size)); - // NACK list does not change size but the content is changing. The oldest - // element is removed and one from late list is inserted. - size_t n; - for (n = 1; n <= static_cast(kNackThreshold); ++n) { - ++seq_num; - timestamp += kTimestampIncrement; - nack->UpdateLastReceivedPacket(seq_num, timestamp); - nack_list = nack->GetNackList(kShortRoundTripTimeMs); - EXPECT_TRUE(IsNackListCorrect( - nack_list, &seq_num_lost[num_lost_packets - kNackListSize + n], - expected_size)); - } - // NACK list should shrink. - for (; n < kNackListSize; ++n) { + for (size_t n = 1; n < kNackListSize; ++n) { ++seq_num; timestamp += kTimestampIncrement; - nack->UpdateLastReceivedPacket(seq_num, timestamp); + nack.UpdateLastReceivedPacket(seq_num, timestamp); --expected_size; - nack_list = nack->GetNackList(kShortRoundTripTimeMs); + nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_TRUE(IsNackListCorrect( nack_list, &seq_num_lost[num_lost_packets - kNackListSize + n], expected_size)); @@ -444,28 +405,28 @@ TEST(NackTrackerTest, ChangeOfListSizeAppliedAndOldElementsRemoved) { // After this packet, NACK list should be empty. ++seq_num; timestamp += kTimestampIncrement; - nack->UpdateLastReceivedPacket(seq_num, timestamp); - nack_list = nack->GetNackList(kShortRoundTripTimeMs); + nack.UpdateLastReceivedPacket(seq_num, timestamp); + nack_list = nack.GetNackList(kShortRoundTripTimeMs); EXPECT_TRUE(nack_list.empty()); } } TEST(NackTrackerTest, RoudTripTimeIsApplied) { const int kNackListSize = 200; - std::unique_ptr nack(NackTracker::Create(kNackThreshold)); - nack->UpdateSampleRate(kSampleRateHz); - nack->SetMaxNackListSize(kNackListSize); + NackTracker nack; + nack.UpdateSampleRate(kSampleRateHz); + nack.SetMaxNackListSize(kNackListSize); uint16_t seq_num = 0; uint32_t timestamp = 0x87654321; - nack->UpdateLastReceivedPacket(seq_num, timestamp); + nack.UpdateLastReceivedPacket(seq_num, timestamp); // Packet lost more than NACK-list size limit. - uint16_t kNumLostPackets = kNackThreshold + 5; + uint16_t kNumLostPackets = 5; seq_num += (1 + kNumLostPackets); timestamp += (1 + kNumLostPackets) * kTimestampIncrement; - nack->UpdateLastReceivedPacket(seq_num, timestamp); + nack.UpdateLastReceivedPacket(seq_num, timestamp); // Expected time-to-play are: // kPacketSizeMs - 10, 2*kPacketSizeMs - 10, 3*kPacketSizeMs - 10, ... @@ -473,10 +434,132 @@ TEST(NackTrackerTest, RoudTripTimeIsApplied) { // sequence number: 1, 2, 3, 4, 5 // time-to-play: 20, 50, 80, 110, 140 // - std::vector nack_list = nack->GetNackList(100); + std::vector nack_list = nack.GetNackList(100); ASSERT_EQ(2u, nack_list.size()); EXPECT_EQ(4, nack_list[0]); EXPECT_EQ(5, nack_list[1]); } +// Set never_nack_multiple_times to true with a field trial and verify that +// packets are not nacked multiple times. +TEST(NackTrackerTest, DoNotNackMultipleTimes) { + test::ScopedFieldTrials field_trials( + "WebRTC-Audio-NetEqNackTrackerConfig/" + "packet_loss_forget_factor:0.996,ms_per_loss_percent:20," + "never_nack_multiple_times:true/"); + const int kNackListSize = 200; + NackTracker nack; + nack.UpdateSampleRate(kSampleRateHz); + nack.SetMaxNackListSize(kNackListSize); + + uint16_t seq_num = 0; + uint32_t timestamp = 0x87654321; + nack.UpdateLastReceivedPacket(seq_num, timestamp); + + uint16_t kNumLostPackets = 3; + + seq_num += (1 + kNumLostPackets); + timestamp += (1 + kNumLostPackets) * kTimestampIncrement; + nack.UpdateLastReceivedPacket(seq_num, timestamp); + + std::vector nack_list = nack.GetNackList(10); + ASSERT_EQ(3u, nack_list.size()); + EXPECT_EQ(1, nack_list[0]); + EXPECT_EQ(2, nack_list[1]); + EXPECT_EQ(3, nack_list[2]); + // When we get the nack list again, it should be empty. + std::vector nack_list2 = nack.GetNackList(10); + EXPECT_TRUE(nack_list2.empty()); +} + +// Test if estimated packet loss rate is correct. +TEST(NackTrackerTest, PacketLossRateCorrect) { + const int kNackListSize = 200; + NackTracker nack; + nack.UpdateSampleRate(kSampleRateHz); + nack.SetMaxNackListSize(kNackListSize); + uint16_t seq_num = 0; + uint32_t timestamp = 0x87654321; + auto add_packet = [&nack, &seq_num, ×tamp](bool received) { + if (received) { + nack.UpdateLastReceivedPacket(seq_num, timestamp); + } + seq_num++; + timestamp += kTimestampIncrement; + }; + // Add some packets, but every fourth packet is lost. + for (int i = 0; i < 300; i++) { + add_packet(true); + add_packet(true); + add_packet(true); + add_packet(false); + } + // 1 << 28 is 0.25 in Q30. We expect the packet loss estimate to be within + // 0.01 of that. + EXPECT_NEAR(nack.GetPacketLossRateForTest(), 1 << 28, (1 << 30) / 100); +} + +TEST(NackTrackerTest, DoNotNackAfterDtx) { + const int kNackListSize = 200; + NackTracker nack; + nack.UpdateSampleRate(kSampleRateHz); + nack.SetMaxNackListSize(kNackListSize); + uint16_t seq_num = 0; + uint32_t timestamp = 0x87654321; + nack.UpdateLastReceivedPacket(seq_num, timestamp); + EXPECT_TRUE(nack.GetNackList(0).empty()); + constexpr int kDtxPeriod = 400; + nack.UpdateLastReceivedPacket(seq_num + 2, + timestamp + kDtxPeriod * kSampleRateHz / 1000); + EXPECT_TRUE(nack.GetNackList(0).empty()); +} + +TEST(NackTrackerTest, DoNotNackIfLossRateIsTooHigh) { + test::ScopedFieldTrials field_trials( + "WebRTC-Audio-NetEqNackTrackerConfig/max_loss_rate:0.4/"); + const int kNackListSize = 200; + NackTracker nack; + nack.UpdateSampleRate(kSampleRateHz); + nack.SetMaxNackListSize(kNackListSize); + uint16_t seq_num = 0; + uint32_t timestamp = 0x87654321; + auto add_packet = [&nack, &seq_num, ×tamp](bool received) { + if (received) { + nack.UpdateLastReceivedPacket(seq_num, timestamp); + } + seq_num++; + timestamp += kTimestampIncrement; + }; + for (int i = 0; i < 500; i++) { + add_packet(true); + add_packet(false); + } + // Expect 50% loss rate which is higher that the configured maximum 40%. + EXPECT_NEAR(nack.GetPacketLossRateForTest(), 1 << 29, (1 << 30) / 100); + EXPECT_TRUE(nack.GetNackList(0).empty()); +} + +TEST(NackTrackerTest, OnlyNackIfRttIsValid) { + test::ScopedFieldTrials field_trials( + "WebRTC-Audio-NetEqNackTrackerConfig/require_valid_rtt:true/"); + const int kNackListSize = 200; + NackTracker nack; + nack.UpdateSampleRate(kSampleRateHz); + nack.SetMaxNackListSize(kNackListSize); + uint16_t seq_num = 0; + uint32_t timestamp = 0x87654321; + auto add_packet = [&nack, &seq_num, ×tamp](bool received) { + if (received) { + nack.UpdateLastReceivedPacket(seq_num, timestamp); + } + seq_num++; + timestamp += kTimestampIncrement; + }; + add_packet(true); + add_packet(false); + add_packet(true); + EXPECT_TRUE(nack.GetNackList(0).empty()); + EXPECT_FALSE(nack.GetNackList(10).empty()); +} + } // namespace webrtc diff --git a/modules/audio_coding/neteq/neteq_impl.cc b/modules/audio_coding/neteq/neteq_impl.cc index 8b07d7e47c..30886c3ace 100644 --- a/modules/audio_coding/neteq/neteq_impl.cc +++ b/modules/audio_coding/neteq/neteq_impl.cc @@ -10,13 +10,12 @@ #include "modules/audio_coding/neteq/neteq_impl.h" -#include - #include #include #include #include #include +#include #include #include @@ -51,7 +50,6 @@ #include "rtc_base/strings/audio_format_to_string.h" #include "rtc_base/trace_event.h" #include "system_wrappers/include/clock.h" -#include "system_wrappers/include/field_trial.h" namespace webrtc { namespace { @@ -60,38 +58,18 @@ std::unique_ptr CreateNetEqController( const NetEqControllerFactory& controller_factory, int base_min_delay, int max_packets_in_buffer, - bool enable_rtx_handling, bool allow_time_stretching, TickTimer* tick_timer, webrtc::Clock* clock) { NetEqController::Config config; config.base_min_delay_ms = base_min_delay; config.max_packets_in_buffer = max_packets_in_buffer; - config.enable_rtx_handling = enable_rtx_handling; config.allow_time_stretching = allow_time_stretching; config.tick_timer = tick_timer; config.clock = clock; return controller_factory.CreateNetEqController(config); } -int GetDelayChainLengthMs(int config_extra_delay_ms) { - constexpr char kExtraDelayFieldTrial[] = "WebRTC-Audio-NetEqExtraDelay"; - if (webrtc::field_trial::IsEnabled(kExtraDelayFieldTrial)) { - const auto field_trial_string = - webrtc::field_trial::FindFullName(kExtraDelayFieldTrial); - int extra_delay_ms = -1; - if (sscanf(field_trial_string.c_str(), "Enabled-%d", &extra_delay_ms) == - 1 && - extra_delay_ms >= 0 && extra_delay_ms <= 2000) { - RTC_LOG(LS_INFO) << "Delay chain length set to " << extra_delay_ms - << " ms in field trial"; - return (extra_delay_ms / 10) * 10; // Rounding down to multiple of 10. - } - } - // Field trial not set, or invalid value read. Use value from config. - return config_extra_delay_ms; -} - } // namespace NetEqImpl::Dependencies::Dependencies( @@ -112,7 +90,6 @@ NetEqImpl::Dependencies::Dependencies( CreateNetEqController(controller_factory, config.min_delay_ms, config.max_packets_in_buffer, - config.enable_rtx_handling, !config.for_test_no_time_stretching, tick_timer.get(), clock)), @@ -158,11 +135,7 @@ NetEqImpl::NetEqImpl(const NetEq::Config& config, speech_expand_uma_logger_("WebRTC.Audio.SpeechExpandRatePercent", 10, // Report once every 10 s. tick_timer_.get()), - no_time_stretching_(config.for_test_no_time_stretching), - enable_rtx_handling_(config.enable_rtx_handling), - output_delay_chain_ms_( - GetDelayChainLengthMs(config.extra_output_delay_ms)), - output_delay_chain_(rtc::CheckedDivExact(output_delay_chain_ms_, 10)) { + no_time_stretching_(config.for_test_no_time_stretching) { RTC_LOG(LS_INFO) << "NetEq config: " << config.ToString(); int fs = config.sample_rate_hz; if (fs != 8000 && fs != 16000 && fs != 32000 && fs != 48000) { @@ -200,11 +173,12 @@ int NetEqImpl::InsertPacket(const RTPHeader& rtp_header, return kOK; } -void NetEqImpl::InsertEmptyPacket(const RTPHeader& /*rtp_header*/) { - // TODO(henrik.lundin) Handle NACK as well. This will make use of the - // rtp_header parameter. - // https://bugs.chromium.org/p/webrtc/issues/detail?id=7611 +void NetEqImpl::InsertEmptyPacket(const RTPHeader& rtp_header) { MutexLock lock(&mutex_); + if (nack_enabled_) { + nack_->UpdateLastReceivedPacket(rtp_header.sequenceNumber, + rtp_header.timestamp); + } controller_->RegisterEmptyPacket(); } @@ -247,7 +221,7 @@ void SetAudioFrameActivityAndType(bool vad_enabled, break; } default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } if (!vad_enabled) { // Always set kVadUnknown when receive VAD is inactive. @@ -279,27 +253,8 @@ int NetEqImpl::GetAudio(AudioFrame* audio_frame, last_output_sample_rate_hz_ == 48000) << "Unexpected sample rate " << last_output_sample_rate_hz_; - if (!output_delay_chain_.empty()) { - if (output_delay_chain_empty_) { - for (auto& f : output_delay_chain_) { - f.CopyFrom(*audio_frame); - } - output_delay_chain_empty_ = false; - delayed_last_output_sample_rate_hz_ = last_output_sample_rate_hz_; - } else { - RTC_DCHECK_GE(output_delay_chain_ix_, 0); - RTC_DCHECK_LT(output_delay_chain_ix_, output_delay_chain_.size()); - swap(output_delay_chain_[output_delay_chain_ix_], *audio_frame); - *muted = audio_frame->muted(); - output_delay_chain_ix_ = - (output_delay_chain_ix_ + 1) % output_delay_chain_.size(); - delayed_last_output_sample_rate_hz_ = audio_frame->sample_rate_hz(); - } - } - if (current_sample_rate_hz) { - *current_sample_rate_hz = delayed_last_output_sample_rate_hz_.value_or( - last_output_sample_rate_hz_); + *current_sample_rate_hz = last_output_sample_rate_hz_; } return kOK; @@ -344,8 +299,7 @@ bool NetEqImpl::SetMinimumDelay(int delay_ms) { MutexLock lock(&mutex_); if (delay_ms >= 0 && delay_ms <= 10000) { RTC_DCHECK(controller_.get()); - return controller_->SetMinimumDelay( - std::max(delay_ms - output_delay_chain_ms_, 0)); + return controller_->SetMinimumDelay(delay_ms); } return false; } @@ -354,8 +308,7 @@ bool NetEqImpl::SetMaximumDelay(int delay_ms) { MutexLock lock(&mutex_); if (delay_ms >= 0 && delay_ms <= 10000) { RTC_DCHECK(controller_.get()); - return controller_->SetMaximumDelay( - std::max(delay_ms - output_delay_chain_ms_, 0)); + return controller_->SetMaximumDelay(delay_ms); } return false; } @@ -376,7 +329,7 @@ int NetEqImpl::GetBaseMinimumDelayMs() const { int NetEqImpl::TargetDelayMs() const { MutexLock lock(&mutex_); RTC_DCHECK(controller_.get()); - return controller_->TargetLevelMs() + output_delay_chain_ms_; + return controller_->TargetLevelMs(); } int NetEqImpl::FilteredCurrentDelayMs() const { @@ -386,8 +339,7 @@ int NetEqImpl::FilteredCurrentDelayMs() const { const int delay_samples = controller_->GetFilteredBufferLevel() + sync_buffer_->FutureLength(); // The division below will truncate. The return value is in ms. - return delay_samples / rtc::CheckedDivExact(fs_hz_, 1000) + - output_delay_chain_ms_; + return delay_samples / rtc::CheckedDivExact(fs_hz_, 1000); } int NetEqImpl::NetworkStatistics(NetEqNetworkStatistics* stats) { @@ -395,11 +347,6 @@ int NetEqImpl::NetworkStatistics(NetEqNetworkStatistics* stats) { RTC_DCHECK(decoder_database_.get()); *stats = CurrentNetworkStatisticsInternal(); stats_->GetNetworkStatistics(decoder_frame_length_, stats); - // Compensate for output delay chain. - stats->mean_waiting_time_ms += output_delay_chain_ms_; - stats->median_waiting_time_ms += output_delay_chain_ms_; - stats->min_waiting_time_ms += output_delay_chain_ms_; - stats->max_waiting_time_ms += output_delay_chain_ms_; return 0; } @@ -421,10 +368,6 @@ NetEqNetworkStatistics NetEqImpl::CurrentNetworkStatisticsInternal() const { RTC_DCHECK_GT(fs_hz_, 0); stats.current_buffer_size_ms = static_cast(total_samples_in_buffers * 1000 / fs_hz_); - - // Compensate for output delay chain. - stats.current_buffer_size_ms += output_delay_chain_ms_; - stats.preferred_buffer_size_ms += output_delay_chain_ms_; return stats; } @@ -468,19 +411,12 @@ absl::optional NetEqImpl::GetPlayoutTimestamp() const { // which is indicated by returning an empty value. return absl::nullopt; } - size_t sum_samples_in_output_delay_chain = 0; - for (const auto& audio_frame : output_delay_chain_) { - sum_samples_in_output_delay_chain += audio_frame.samples_per_channel(); - } - return timestamp_scaler_->ToExternal( - playout_timestamp_ - - static_cast(sum_samples_in_output_delay_chain)); + return timestamp_scaler_->ToExternal(playout_timestamp_); } int NetEqImpl::last_output_sample_rate_hz() const { MutexLock lock(&mutex_); - return delayed_last_output_sample_rate_hz_.value_or( - last_output_sample_rate_hz_); + return last_output_sample_rate_hz_; } absl::optional NetEqImpl::GetDecoderFormat( @@ -518,8 +454,7 @@ void NetEqImpl::FlushBuffers() { void NetEqImpl::EnableNack(size_t max_nack_list_size) { MutexLock lock(&mutex_); if (!nack_enabled_) { - const int kNackThresholdPackets = 2; - nack_.reset(NackTracker::Create(kNackThresholdPackets)); + nack_ = std::make_unique(); nack_enabled_ = true; nack_->UpdateSampleRate(fs_hz_); } @@ -609,7 +544,7 @@ int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header, // Reinitialize NetEq if it's needed (changed SSRC or first call). if (update_sample_rate_and_channels) { - // Note: |first_packet_| will be cleared further down in this method, once + // Note: `first_packet_` will be cleared further down in this method, once // the packet has been successfully inserted into the packet buffer. // Flush the packet buffer and DTMF buffer. @@ -785,8 +720,8 @@ int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header, } if (update_sample_rate_and_channels && !packet_buffer_->Empty()) { - // We do not use |current_rtp_payload_type_| to |set payload_type|, but - // get the next RTP header from |packet_buffer_| to obtain the payload type. + // We do not use `current_rtp_payload_type_` to |set payload_type|, but + // get the next RTP header from `packet_buffer_` to obtain the payload type. // The reason for it is the following corner case. If NetEq receives a // CNG packet with a sample rate different than the current CNG then it // flushes its buffer, assuming send codec must have been changed. However, @@ -826,14 +761,8 @@ int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header, info.main_sequence_number = main_sequence_number; info.is_dtx = is_dtx; info.buffer_flush = buffer_flush_occured; - // Only update statistics if incoming packet is not older than last played - // out packet or RTX handling is enabled, and if new codec flag is not - // set. - const bool should_update_stats = - (enable_rtx_handling_ || - static_cast(main_timestamp - timestamp_) >= 0) && - !new_codec_; + const bool should_update_stats = !new_codec_; auto relative_delay = controller_->PacketArrived(fs_hz_, should_update_stats, info); if (relative_delay) { @@ -869,6 +798,11 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, RTC_DCHECK(audio_frame->muted()); // Reset() should mute the frame. playout_timestamp_ += static_cast(output_size_samples_); audio_frame->sample_rate_hz_ = fs_hz_; + // Make sure the total number of samples fits in the AudioFrame. + if (output_size_samples_ * sync_buffer_->Channels() > + AudioFrame::kMaxDataSizeSamples) { + return kSampleUnderrun; + } audio_frame->samples_per_channel_ = output_size_samples_; audio_frame->timestamp_ = first_packet_ @@ -965,7 +899,7 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, } case Operation::kUndefined: { RTC_LOG(LS_ERROR) << "Invalid operation kUndefined."; - RTC_NOTREACHED(); // This should not happen. + RTC_DCHECK_NOTREACHED(); // This should not happen. last_mode_ = Mode::kError; return kInvalidOperation; } @@ -979,18 +913,18 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, comfort_noise_->Reset(); } - // We treat it as if all packets referenced to by |last_decoded_packet_infos_| - // were mashed together when creating the samples in |algorithm_buffer_|. + // We treat it as if all packets referenced to by `last_decoded_packet_infos_` + // were mashed together when creating the samples in `algorithm_buffer_`. RtpPacketInfos packet_infos(last_decoded_packet_infos_); - // Copy samples from |algorithm_buffer_| to |sync_buffer_|. + // Copy samples from `algorithm_buffer_` to `sync_buffer_`. // // TODO(bugs.webrtc.org/10757): - // We would in the future also like to pass |packet_infos| so that we can do - // sample-perfect tracking of that information across |sync_buffer_|. + // We would in the future also like to pass `packet_infos` so that we can do + // sample-perfect tracking of that information across `sync_buffer_`. sync_buffer_->PushBack(*algorithm_buffer_); - // Extract data from |sync_buffer_| to |output|. + // Extract data from `sync_buffer_` to `output`. size_t num_output_samples_per_channel = output_size_samples_; size_t num_output_samples = output_size_samples_ * sync_buffer_->Channels(); if (num_output_samples > AudioFrame::kMaxDataSizeSamples) { @@ -1007,14 +941,14 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, audio_frame->sample_rate_hz_ = fs_hz_; // TODO(bugs.webrtc.org/10757): // We don't have the ability to properly track individual packets once their - // audio samples have entered |sync_buffer_|. So for now, treat it as if - // |packet_infos| from packets decoded by the current |GetAudioInternal()| + // audio samples have entered `sync_buffer_`. So for now, treat it as if + // `packet_infos` from packets decoded by the current `GetAudioInternal()` // call were all consumed assembling the current audio frame and the current // audio frame only. audio_frame->packet_infos_ = std::move(packet_infos); if (sync_buffer_->FutureLength() < expand_->overlap_length()) { - // The sync buffer should always contain |overlap_length| samples, but now - // too many samples have been extracted. Reinstall the |overlap_length| + // The sync buffer should always contain `overlap_length` samples, but now + // too many samples have been extracted. Reinstall the `overlap_length` // lookahead by moving the index. const size_t missing_lookahead_samples = expand_->overlap_length() - sync_buffer_->FutureLength(); @@ -1032,7 +966,7 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, return kSampleUnderrun; } - // Should always have overlap samples left in the |sync_buffer_|. + // Should always have overlap samples left in the `sync_buffer_`. RTC_DCHECK_GE(sync_buffer_->FutureLength(), expand_->overlap_length()); // TODO(yujo): For muted frames, this can be a copy rather than an addition. @@ -1042,7 +976,7 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, } // Update the background noise parameters if last operation wrote data - // straight from the decoder to the |sync_buffer_|. That is, none of the + // straight from the decoder to the `sync_buffer_`. That is, none of the // operations that modify the signal can be followed by a parameter update. if ((last_mode_ == Mode::kNormal) || (last_mode_ == Mode::kAccelerateFail) || (last_mode_ == Mode::kPreemptiveExpandFail) || @@ -1052,14 +986,14 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, } if (operation == Operation::kDtmf) { - // DTMF data was written the end of |sync_buffer_|. - // Update index to end of DTMF data in |sync_buffer_|. + // DTMF data was written the end of `sync_buffer_`. + // Update index to end of DTMF data in `sync_buffer_`. sync_buffer_->set_dtmf_index(sync_buffer_->Size()); } if (last_mode_ != Mode::kExpand && last_mode_ != Mode::kCodecPlc) { - // If last operation was not expand, calculate the |playout_timestamp_| from - // the |sync_buffer_|. However, do not update the |playout_timestamp_| if it + // If last operation was not expand, calculate the `playout_timestamp_` from + // the `sync_buffer_`. However, do not update the `playout_timestamp_` if it // would be moved "backwards". uint32_t temp_timestamp = sync_buffer_->end_timestamp() - @@ -1068,7 +1002,7 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, playout_timestamp_ = temp_timestamp; } } else { - // Use dead reckoning to estimate the |playout_timestamp_|. + // Use dead reckoning to estimate the `playout_timestamp_`. playout_timestamp_ += static_cast(output_size_samples_); } // Set the timestamp in the audio frame to zero before the first packet has @@ -1128,7 +1062,7 @@ int NetEqImpl::GetDecision(Operation* operation, // Don't use this packet, discard it. if (packet_buffer_->DiscardNextPacket(stats_.get()) != PacketBuffer::kOK) { - RTC_NOTREACHED(); // Must be ok by design. + RTC_DCHECK_NOTREACHED(); // Must be ok by design. } // Check buffer again. if (!new_codec_) { @@ -1207,7 +1141,7 @@ int NetEqImpl::GetDecision(Operation* operation, // Use the provided action instead of the decision NetEq decided on. *operation = *action_override; } - // Check if we already have enough samples in the |sync_buffer_|. If so, + // Check if we already have enough samples in the `sync_buffer_`. If so, // change decision to normal, unless the decision was merge, accelerate, or // preemptive expand. if (samples_left >= rtc::dchecked_cast(output_size_samples_) && @@ -1246,7 +1180,7 @@ int NetEqImpl::GetDecision(Operation* operation, *operation = Operation::kNormal; } } - // Adjust |sync_buffer_| timestamp before setting |end_timestamp| to the + // Adjust `sync_buffer_` timestamp before setting `end_timestamp` to the // new value. sync_buffer_->IncreaseEndTimestamp(timestamp_ - end_timestamp); end_timestamp = timestamp_; @@ -1381,7 +1315,7 @@ int NetEqImpl::GetDecision(Operation* operation, } } - timestamp_ = end_timestamp; + timestamp_ = sync_buffer_->end_timestamp(); return 0; } @@ -1536,7 +1470,7 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, while (!packet_list->empty() && !decoder_database_->IsComfortNoise( packet_list->front().payload_type)) { RTC_DCHECK(decoder); // At this point, we must have a decoder object. - // The number of channels in the |sync_buffer_| should be the same as the + // The number of channels in the `sync_buffer_` should be the same as the // number decoder channels. RTC_DCHECK_EQ(sync_buffer_->Channels(), decoder->Channels()); RTC_DCHECK_GE(decoded_buffer_length_, kMaxFrameSize * decoder->Channels()); @@ -1558,7 +1492,7 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, *speech_type = result.speech_type; if (result.num_decoded_samples > 0) { *decoded_length += rtc::dchecked_cast(result.num_decoded_samples); - // Update |decoder_frame_length_| with number of samples per channel. + // Update `decoder_frame_length_` with number of samples per channel. decoder_frame_length_ = result.num_decoded_samples / decoder->Channels(); } @@ -1734,7 +1668,7 @@ int NetEqImpl::DoAccelerate(int16_t* decoded_buffer, size_t num_channels = algorithm_buffer_->Channels(); size_t decoded_length_per_channel = decoded_length / num_channels; if (decoded_length_per_channel < required_samples) { - // Must move data from the |sync_buffer_| in order to get 30 ms. + // Must move data from the `sync_buffer_` in order to get 30 ms. borrowed_samples_per_channel = static_cast(required_samples - decoded_length_per_channel); memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels], @@ -1766,7 +1700,7 @@ int NetEqImpl::DoAccelerate(int16_t* decoded_buffer, } if (borrowed_samples_per_channel > 0) { - // Copy borrowed samples back to the |sync_buffer_|. + // Copy borrowed samples back to the `sync_buffer_`. size_t length = algorithm_buffer_->Size(); if (length < borrowed_samples_per_channel) { // This destroys the beginning of the buffer, but will not cause any @@ -1807,7 +1741,7 @@ int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer, size_t old_borrowed_samples_per_channel = 0; size_t decoded_length_per_channel = decoded_length / num_channels; if (decoded_length_per_channel < required_samples) { - // Must move data from the |sync_buffer_| in order to get 30 ms. + // Must move data from the `sync_buffer_` in order to get 30 ms. borrowed_samples_per_channel = required_samples - decoded_length_per_channel; // Calculate how many of these were already played out. @@ -1844,7 +1778,7 @@ int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer, } if (borrowed_samples_per_channel > 0) { - // Copy borrowed samples back to the |sync_buffer_|. + // Copy borrowed samples back to the `sync_buffer_`. sync_buffer_->ReplaceAtIndex( *algorithm_buffer_, borrowed_samples_per_channel, sync_buffer_->Size() - borrowed_samples_per_channel); @@ -1904,10 +1838,10 @@ void NetEqImpl::DoCodecInternalCng(const int16_t* decoded_buffer, } int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf) { - // This block of the code and the block further down, handling |dtmf_switch| + // This block of the code and the block further down, handling `dtmf_switch` // are commented out. Otherwise playing out-of-band DTMF would fail in VoE // test, DtmfTest.ManualSuccessfullySendsOutOfBandTelephoneEvents. This is - // equivalent to |dtmf_switch| always be false. + // equivalent to `dtmf_switch` always be false. // // See http://webrtc-codereview.appspot.com/1195004/ for discussion // On this issue. This change might cause some glitches at the point of @@ -1917,7 +1851,7 @@ int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf) { // if ((last_mode_ != Modes::kDtmf) && // dtmf_tone_generator_->initialized()) { // // Special case; see below. - // // We must catch this before calling Generate, since |initialized| is + // // We must catch this before calling Generate, since `initialized` is // // modified in that call. // dtmf_switch = true; // } @@ -1948,8 +1882,8 @@ int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf) { // // it must be copied to the speech buffer. // // TODO(hlundin): This code seems incorrect. (Legacy.) Write test and // // verify correct operation. - // RTC_NOTREACHED(); - // // Must generate enough data to replace all of the |sync_buffer_| + // RTC_DCHECK_NOTREACHED(); + // // Must generate enough data to replace all of the `sync_buffer_` // // "future". // int required_length = sync_buffer_->FutureLength(); // RTC_DCHECK(dtmf_tone_generator_->initialized()); @@ -2034,11 +1968,12 @@ int NetEqImpl::ExtractPackets(size_t required_samples, do { timestamp_ = next_packet->timestamp; absl::optional packet = packet_buffer_->GetNextPacket(); - // |next_packet| may be invalid after the |packet_buffer_| operation. + // `next_packet` may be invalid after the `packet_buffer_` operation. next_packet = nullptr; if (!packet) { RTC_LOG(LS_ERROR) << "Should always be able to extract a packet here"; - RTC_NOTREACHED(); // Should always be able to extract a packet here. + RTC_DCHECK_NOTREACHED(); // Should always be able to extract a packet + // here. return -1; } const uint64_t waiting_time_ms = packet->waiting_time->ElapsedMs(); @@ -2072,7 +2007,7 @@ int NetEqImpl::ExtractPackets(size_t required_samples, } else if (!has_cng_packet) { RTC_LOG(LS_WARNING) << "Unknown payload type " << static_cast(packet->payload_type); - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } if (packet_duration == 0) { @@ -2083,9 +2018,8 @@ int NetEqImpl::ExtractPackets(size_t required_samples, extracted_samples = packet->timestamp - first_timestamp + packet_duration; RTC_DCHECK(controller_); - stats_->JitterBufferDelay( - packet_duration, waiting_time_ms + output_delay_chain_ms_, - controller_->TargetLevelMs() + output_delay_chain_ms_); + stats_->JitterBufferDelay(packet_duration, waiting_time_ms, + controller_->TargetLevelMs()); packet_list->push_back(std::move(*packet)); // Store packet in list. packet = absl::nullopt; // Ensure it's never used after the move. @@ -2181,7 +2115,7 @@ void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) { comfort_noise_.reset( new ComfortNoise(fs_hz, decoder_database_.get(), sync_buffer_.get())); - // Verify that |decoded_buffer_| is long enough. + // Verify that `decoded_buffer_` is long enough. if (decoded_buffer_length_ < kMaxFrameSize * channels) { // Reallocate to larger size. decoded_buffer_length_ = kMaxFrameSize * channels; diff --git a/modules/audio_coding/neteq/neteq_impl.h b/modules/audio_coding/neteq/neteq_impl.h index 88da6dcbd5..e2cd6c6054 100644 --- a/modules/audio_coding/neteq/neteq_impl.h +++ b/modules/audio_coding/neteq/neteq_impl.h @@ -29,7 +29,6 @@ #include "modules/audio_coding/neteq/packet.h" #include "modules/audio_coding/neteq/random_vector.h" #include "modules/audio_coding/neteq/statistics_calculator.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" @@ -124,6 +123,9 @@ class NetEqImpl : public webrtc::NetEq { ~NetEqImpl() override; + NetEqImpl(const NetEqImpl&) = delete; + NetEqImpl& operator=(const NetEqImpl&) = delete; + // Inserts a new packet into NetEq. Returns 0 on success, -1 on failure. int InsertPacket(const RTPHeader& rtp_header, rtc::ArrayView payload) override; @@ -141,7 +143,7 @@ class NetEqImpl : public webrtc::NetEq { bool RegisterPayloadType(int rtp_payload_type, const SdpAudioFormat& audio_format) override; - // Removes |rtp_payload_type| from the codec database. Returns 0 on success, + // Removes `rtp_payload_type` from the codec database. Returns 0 on success, // -1 on failure. int RemovePayloadType(uint8_t rtp_payload_type) override; @@ -159,7 +161,7 @@ class NetEqImpl : public webrtc::NetEq { int FilteredCurrentDelayMs() const override; - // Writes the current network statistics to |stats|. The statistics are reset + // Writes the current network statistics to `stats`. The statistics are reset // after the call. int NetworkStatistics(NetEqNetworkStatistics* stats) override; @@ -215,7 +217,7 @@ class NetEqImpl : public webrtc::NetEq { rtc::ArrayView payload) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - // Delivers 10 ms of audio data. The data is written to |audio_frame|. + // Delivers 10 ms of audio data. The data is written to `audio_frame`. // Returns 0 on success, otherwise an error code. int GetAudioInternal(AudioFrame* audio_frame, bool* muted, @@ -223,9 +225,9 @@ class NetEqImpl : public webrtc::NetEq { RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Provides a decision to the GetAudioInternal method. The decision what to - // do is written to |operation|. Packets to decode are written to - // |packet_list|, and a DTMF event to play is written to |dtmf_event|. When - // DTMF should be played, |play_dtmf| is set to true by the method. + // do is written to `operation`. Packets to decode are written to + // `packet_list`, and a DTMF event to play is written to `dtmf_event`. When + // DTMF should be played, `play_dtmf` is set to true by the method. // Returns 0 on success, otherwise an error code. int GetDecision(Operation* operation, PacketList* packet_list, @@ -234,11 +236,11 @@ class NetEqImpl : public webrtc::NetEq { absl::optional action_override) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - // Decodes the speech packets in |packet_list|, and writes the results to - // |decoded_buffer|, which is allocated to hold |decoded_buffer_length| - // elements. The length of the decoded data is written to |decoded_length|. + // Decodes the speech packets in `packet_list`, and writes the results to + // `decoded_buffer`, which is allocated to hold `decoded_buffer_length` + // elements. The length of the decoded data is written to `decoded_length`. // The speech type -- speech or (codec-internal) comfort noise -- is written - // to |speech_type|. If |packet_list| contains any SID frames for RFC 3389 + // to `speech_type`. If `packet_list` contains any SID frames for RFC 3389 // comfort noise, those are not decoded. int Decode(PacketList* packet_list, Operation* operation, @@ -293,7 +295,7 @@ class NetEqImpl : public webrtc::NetEq { bool play_dtmf) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Sub-method which calls the ComfortNoise class to generate RFC 3389 comfort - // noise. |packet_list| can either contain one SID frame to update the + // noise. `packet_list` can either contain one SID frame to update the // noise parameters, or no payload at all, in which case the previously // received parameters are used. int DoRfc3389Cng(PacketList* packet_list, bool play_dtmf) @@ -308,20 +310,20 @@ class NetEqImpl : public webrtc::NetEq { int DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - // Overdub DTMF on top of |output|. + // Overdub DTMF on top of `output`. int DtmfOverdub(const DtmfEvent& dtmf_event, size_t num_channels, int16_t* output) const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - // Extracts packets from |packet_buffer_| to produce at least - // |required_samples| samples. The packets are inserted into |packet_list|. + // Extracts packets from `packet_buffer_` to produce at least + // `required_samples` samples. The packets are inserted into `packet_list`. // Returns the number of samples that the packets in the list will produce, or // -1 in case of an error. int ExtractPackets(size_t required_samples, PacketList* packet_list) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Resets various variables and objects to new values based on the sample rate - // |fs_hz| and |channels| number audio channels. + // `fs_hz` and `channels` number audio channels. void SetSampleRateAndChannels(int fs_hz, size_t channels) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); @@ -399,26 +401,6 @@ class NetEqImpl : public webrtc::NetEq { ExpandUmaLogger speech_expand_uma_logger_ RTC_GUARDED_BY(mutex_); bool no_time_stretching_ RTC_GUARDED_BY(mutex_); // Only used for test. rtc::BufferT concealment_audio_ RTC_GUARDED_BY(mutex_); - const bool enable_rtx_handling_ RTC_GUARDED_BY(mutex_); - // Data members used for adding extra delay to the output of NetEq. - // The delay in ms (which is 10 times the number of elements in - // output_delay_chain_). - const int output_delay_chain_ms_ RTC_GUARDED_BY(mutex_); - // Vector of AudioFrames which contains the delayed audio. Accessed as a - // circular buffer. - std::vector output_delay_chain_ RTC_GUARDED_BY(mutex_); - // Index into output_delay_chain_. - size_t output_delay_chain_ix_ RTC_GUARDED_BY(mutex_) = 0; - // Did output_delay_chain_ get populated yet? - bool output_delay_chain_empty_ RTC_GUARDED_BY(mutex_) = true; - // Contains the sample rate of the AudioFrame last emitted from the delay - // chain. If the extra output delay chain is not used, or if no audio has been - // emitted yet, the variable is empty. - absl::optional delayed_last_output_sample_rate_hz_ - RTC_GUARDED_BY(mutex_); - - private: - RTC_DISALLOW_COPY_AND_ASSIGN(NetEqImpl); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/neteq_impl_unittest.cc b/modules/audio_coding/neteq/neteq_impl_unittest.cc index 53b4dae17d..b39a880292 100644 --- a/modules/audio_coding/neteq/neteq_impl_unittest.cc +++ b/modules/audio_coding/neteq/neteq_impl_unittest.cc @@ -18,6 +18,7 @@ #include "api/neteq/default_neteq_controller_factory.h" #include "api/neteq/neteq.h" #include "api/neteq/neteq_controller.h" +#include "modules/audio_coding/codecs/g711/audio_decoder_pcm.h" #include "modules/audio_coding/neteq/accelerate.h" #include "modules/audio_coding/neteq/decision_logic.h" #include "modules/audio_coding/neteq/default_neteq_factory.h" @@ -75,6 +76,7 @@ class NetEqImplTest : public ::testing::Test { void CreateInstance( const rtc::scoped_refptr& decoder_factory) { ASSERT_TRUE(decoder_factory); + config_.enable_muted_state = enable_muted_state_; NetEqImpl::Dependencies deps(config_, &clock_, decoder_factory, DefaultNetEqControllerFactory()); @@ -122,7 +124,6 @@ class NetEqImplTest : public ::testing::Test { NetEqController::Config controller_config; controller_config.tick_timer = tick_timer_; controller_config.base_min_delay_ms = config_.min_delay_ms; - controller_config.enable_rtx_handling = config_.enable_rtx_handling; controller_config.allow_time_stretching = true; controller_config.max_packets_in_buffer = config_.max_packets_in_buffer; controller_config.clock = &clock_; @@ -207,8 +208,8 @@ class NetEqImplTest : public ::testing::Test { EXPECT_EQ(1u, output.num_channels_); EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); - // DTMF packets are immediately consumed by |InsertPacket()| and won't be - // returned by |GetAudio()|. + // DTMF packets are immediately consumed by `InsertPacket()` and won't be + // returned by `GetAudio()`. EXPECT_THAT(output.packet_infos_, IsEmpty()); // Verify first 64 samples of actual output. @@ -246,6 +247,7 @@ class NetEqImplTest : public ::testing::Test { MockRedPayloadSplitter* mock_payload_splitter_ = nullptr; RedPayloadSplitter* red_payload_splitter_ = nullptr; bool use_mock_payload_splitter_ = true; + bool enable_muted_state_ = false; }; // This tests the interface class NetEq. @@ -461,7 +463,7 @@ TEST_F(NetEqImplTest, VerifyTimestampPropagation) { public: CountingSamplesDecoder() : next_value_(1) {} - // Produce as many samples as input bytes (|encoded_len|). + // Produce as many samples as input bytes (`encoded_len`). int DecodeInternal(const uint8_t* encoded, size_t encoded_len, int /* sample_rate_hz */, @@ -510,7 +512,7 @@ TEST_F(NetEqImplTest, VerifyTimestampPropagation) { EXPECT_EQ(1u, output.num_channels_); EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); - // Verify |output.packet_infos_|. + // Verify `output.packet_infos_`. ASSERT_THAT(output.packet_infos_, SizeIs(1)); { const auto& packet_info = output.packet_infos_[0]; @@ -550,6 +552,7 @@ TEST_F(NetEqImplTest, VerifyTimestampPropagation) { TEST_F(NetEqImplTest, ReorderedPacket) { UseNoMocks(); + // Create a mock decoder object. MockAudioDecoder mock_decoder; @@ -578,7 +581,7 @@ TEST_F(NetEqImplTest, ReorderedPacket) { .WillRepeatedly(Return(rtc::checked_cast(kPayloadLengthSamples))); int16_t dummy_output[kPayloadLengthSamples] = {0}; // The below expectation will make the mock decoder write - // |kPayloadLengthSamples| zeros to the output array, and mark it as speech. + // `kPayloadLengthSamples` zeros to the output array, and mark it as speech. EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(0), kPayloadLengthBytes, kSampleRateHz, _, _)) .WillOnce(DoAll(SetArrayArgument<3>(dummy_output, @@ -602,7 +605,7 @@ TEST_F(NetEqImplTest, ReorderedPacket) { EXPECT_EQ(1u, output.num_channels_); EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); - // Verify |output.packet_infos_|. + // Verify `output.packet_infos_`. ASSERT_THAT(output.packet_infos_, SizeIs(1)); { const auto& packet_info = output.packet_infos_[0]; @@ -648,7 +651,10 @@ TEST_F(NetEqImplTest, ReorderedPacket) { // out-of-order packet should have been discarded. EXPECT_TRUE(packet_buffer_->Empty()); - // Verify |output.packet_infos_|. Expect to only see the second packet. + // NetEq `discarded_primary_packets` should capture this packet discard. + EXPECT_EQ(1u, neteq_->GetOperationsAndState().discarded_primary_packets); + + // Verify `output.packet_infos_`. Expect to only see the second packet. ASSERT_THAT(output.packet_infos_, SizeIs(1)); { const auto& packet_info = output.packet_infos_[0]; @@ -1023,22 +1029,37 @@ TEST_F(NetEqImplTest, CodecInternalCng) { EXPECT_CALL(mock_decoder, PacketDuration(nullptr, 0)) .WillRepeatedly(Return(rtc::checked_cast(kPayloadLengthSamples))); - // Pointee(x) verifies that first byte of the payload equals x, this makes it - // possible to verify that the correct payload is fed to Decode(). - EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(0), kPayloadLengthBytes, - kSampleRateKhz * 1000, _, _)) - .WillOnce(DoAll(SetArrayArgument<3>(dummy_output, - dummy_output + kPayloadLengthSamples), - SetArgPointee<4>(AudioDecoder::kSpeech), - Return(rtc::checked_cast(kPayloadLengthSamples)))); + EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType, + SdpAudioFormat("opus", 48000, 2))); - EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(1), kPayloadLengthBytes, - kSampleRateKhz * 1000, _, _)) - .WillOnce(DoAll(SetArrayArgument<3>(dummy_output, - dummy_output + kPayloadLengthSamples), - SetArgPointee<4>(AudioDecoder::kComfortNoise), - Return(rtc::checked_cast(kPayloadLengthSamples)))); + struct Packet { + int sequence_number_delta; + int timestamp_delta; + AudioDecoder::SpeechType decoder_output_type; + }; + std::vector packets = { + {0, 0, AudioDecoder::kSpeech}, + {1, kPayloadLengthSamples, AudioDecoder::kComfortNoise}, + {2, 2 * kPayloadLengthSamples, AudioDecoder::kSpeech}, + {1, kPayloadLengthSamples, AudioDecoder::kSpeech}}; + + for (size_t i = 0; i < packets.size(); ++i) { + rtp_header.sequenceNumber += packets[i].sequence_number_delta; + rtp_header.timestamp += packets[i].timestamp_delta; + payload[0] = i; + EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload)); + + // Pointee(x) verifies that first byte of the payload equals x, this makes + // it possible to verify that the correct payload is fed to Decode(). + EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(i), kPayloadLengthBytes, + kSampleRateKhz * 1000, _, _)) + .WillOnce(DoAll(SetArrayArgument<3>( + dummy_output, dummy_output + kPayloadLengthSamples), + SetArgPointee<4>(packets[i].decoder_output_type), + Return(rtc::checked_cast(kPayloadLengthSamples)))); + } + // Expect comfort noise to be returned by the decoder. EXPECT_CALL(mock_decoder, DecodeInternal(IsNull(), 0, kSampleRateKhz * 1000, _, _)) .WillOnce(DoAll(SetArrayArgument<3>(dummy_output, @@ -1046,87 +1067,24 @@ TEST_F(NetEqImplTest, CodecInternalCng) { SetArgPointee<4>(AudioDecoder::kComfortNoise), Return(rtc::checked_cast(kPayloadLengthSamples)))); - EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(2), kPayloadLengthBytes, - kSampleRateKhz * 1000, _, _)) - .WillOnce(DoAll(SetArrayArgument<3>(dummy_output, - dummy_output + kPayloadLengthSamples), - SetArgPointee<4>(AudioDecoder::kSpeech), - Return(rtc::checked_cast(kPayloadLengthSamples)))); - - EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType, - SdpAudioFormat("opus", 48000, 2))); + std::vector expected_output = { + AudioFrame::kNormalSpeech, AudioFrame::kCNG, AudioFrame::kNormalSpeech}; + size_t output_index = 0; - const size_t kMaxOutputSize = static_cast(10 * kSampleRateKhz); - AudioFrame output; - AudioFrame::SpeechType expected_type[8] = { - AudioFrame::kNormalSpeech, AudioFrame::kNormalSpeech, AudioFrame::kCNG, - AudioFrame::kCNG, AudioFrame::kCNG, AudioFrame::kCNG, - AudioFrame::kNormalSpeech, AudioFrame::kNormalSpeech}; - int expected_timestamp_increment[8] = { - -1, // will not be used. - 10 * kSampleRateKhz, - -1, - -1, // timestamp will be empty during CNG mode; indicated by -1 here. - -1, - -1, - 50 * kSampleRateKhz, - 10 * kSampleRateKhz}; - - // Insert one packet (decoder will return speech). - EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload)); - - bool muted; - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted)); - absl::optional last_timestamp = neteq_->GetPlayoutTimestamp(); - ASSERT_TRUE(last_timestamp); - - // Insert second packet (decoder will return CNG). - payload[0] = 1; - rtp_header.sequenceNumber++; - rtp_header.timestamp += kPayloadLengthSamples; - EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload)); - - // Lambda for verifying the timestamps. - auto verify_timestamp = [&last_timestamp, &expected_timestamp_increment]( - absl::optional ts, size_t i) { - if (expected_timestamp_increment[i] == -1) { - // Expect to get an empty timestamp value during CNG and PLC. - EXPECT_FALSE(ts) << "i = " << i; + int timeout_counter = 0; + while (!packet_buffer_->Empty()) { + ASSERT_LT(timeout_counter++, 20) << "Test timed out"; + AudioFrame output; + bool muted; + EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted)); + if (output_index + 1 < expected_output.size() && + output.speech_type_ == expected_output[output_index + 1]) { + ++output_index; } else { - ASSERT_TRUE(ts) << "i = " << i; - EXPECT_EQ(*ts, *last_timestamp + expected_timestamp_increment[i]) - << "i = " << i; - last_timestamp = ts; + EXPECT_EQ(output.speech_type_, expected_output[output_index]); } - }; - - for (size_t i = 1; i < 6; ++i) { - ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_); - EXPECT_EQ(1u, output.num_channels_); - EXPECT_EQ(expected_type[i - 1], output.speech_type_); - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted)); - SCOPED_TRACE(""); - verify_timestamp(neteq_->GetPlayoutTimestamp(), i); - } - - // Insert third packet, which leaves a gap from last packet. - payload[0] = 2; - rtp_header.sequenceNumber += 2; - rtp_header.timestamp += 2 * kPayloadLengthSamples; - EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload)); - - for (size_t i = 6; i < 8; ++i) { - ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_); - EXPECT_EQ(1u, output.num_channels_); - EXPECT_EQ(expected_type[i - 1], output.speech_type_); - EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted)); - SCOPED_TRACE(""); - verify_timestamp(neteq_->GetPlayoutTimestamp(), i); } - // Now check the packet buffer, and make sure it is empty. - EXPECT_TRUE(packet_buffer_->Empty()); - EXPECT_CALL(mock_decoder, Die()); } @@ -1284,7 +1242,7 @@ TEST_F(NetEqImplTest, DecodedPayloadTooShort) { .WillRepeatedly(Return(rtc::checked_cast(kPayloadLengthSamples))); int16_t dummy_output[kPayloadLengthSamples] = {0}; // The below expectation will make the mock decoder write - // |kPayloadLengthSamples| - 5 zeros to the output array, and mark it as + // `kPayloadLengthSamples` - 5 zeros to the output array, and mark it as // speech. That is, the decoded length is 5 samples shorter than the expected. EXPECT_CALL(mock_decoder, DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _)) @@ -1599,12 +1557,11 @@ TEST_F(NetEqImplTest, InsertEmptyPacket) { neteq_->InsertEmptyPacket(rtp_header); } -TEST_F(NetEqImplTest, EnableRtxHandling) { +TEST_F(NetEqImplTest, NotifyControllerOfReorderedPacket) { using ::testing::AllOf; using ::testing::Field; UseNoMocks(); use_mock_neteq_controller_ = true; - config_.enable_rtx_handling = true; CreateInstance(); EXPECT_CALL(*mock_neteq_controller_, GetDecision(_, _)) .Times(1) @@ -1634,7 +1591,7 @@ TEST_F(NetEqImplTest, EnableRtxHandling) { *mock_neteq_controller_, PacketArrived( /*fs_hz*/ 8000, - /*should_update_stats*/ _, + /*should_update_stats*/ true, /*info*/ AllOf( Field(&NetEqController::PacketArrivedInfo::packet_length_samples, @@ -1647,6 +1604,69 @@ TEST_F(NetEqImplTest, EnableRtxHandling) { EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload)); } +// When using a codec with 1000 channels, there should be no crashes. +TEST_F(NetEqImplTest, NoCrashWith1000Channels) { + using ::testing::AllOf; + using ::testing::Field; + UseNoMocks(); + use_mock_decoder_database_ = true; + enable_muted_state_ = true; + CreateInstance(); + const size_t kPayloadLength = 100; + const uint8_t kPayloadType = 0; + const uint16_t kFirstSequenceNumber = 0x1234; + const uint32_t kFirstTimestamp = 0x12345678; + const uint32_t kSsrc = 0x87654321; + uint8_t payload[kPayloadLength] = {0}; + RTPHeader rtp_header; + rtp_header.payloadType = kPayloadType; + rtp_header.sequenceNumber = kFirstSequenceNumber; + rtp_header.timestamp = kFirstTimestamp; + rtp_header.ssrc = kSsrc; + Packet fake_packet; + fake_packet.payload_type = kPayloadType; + fake_packet.sequence_number = kFirstSequenceNumber; + fake_packet.timestamp = kFirstTimestamp; + + AudioDecoder* decoder = nullptr; + + auto mock_decoder_factory = rtc::make_ref_counted(); + EXPECT_CALL(*mock_decoder_factory, MakeAudioDecoderMock(_, _, _)) + .WillOnce(Invoke([&](const SdpAudioFormat& format, + absl::optional codec_pair_id, + std::unique_ptr* dec) { + EXPECT_EQ("pcmu", format.name); + *dec = std::make_unique(1000); + decoder = dec->get(); + })); + DecoderDatabase::DecoderInfo info(SdpAudioFormat("pcmu", 8000, 1), + absl::nullopt, mock_decoder_factory); + // Expectations for decoder database. + EXPECT_CALL(*mock_decoder_database_, GetDecoderInfo(kPayloadType)) + .WillRepeatedly(Return(&info)); + EXPECT_CALL(*mock_decoder_database_, GetActiveCngDecoder()) + .WillRepeatedly(ReturnNull()); + EXPECT_CALL(*mock_decoder_database_, GetActiveDecoder()) + .WillRepeatedly(Return(decoder)); + EXPECT_CALL(*mock_decoder_database_, SetActiveDecoder(_, _)) + .WillOnce(Invoke([](uint8_t rtp_payload_type, bool* new_decoder) { + *new_decoder = true; + return 0; + })); + + // Insert first packet. + neteq_->InsertPacket(rtp_header, payload); + + AudioFrame audio_frame; + bool muted; + + // Repeat 40 times to ensure we enter muted state. + for (int i = 0; i < 40; i++) { + // GetAudio should return an error, and not crash, even in muted state. + EXPECT_NE(0, neteq_->GetAudio(&audio_frame, &muted)); + } +} + class Decoder120ms : public AudioDecoder { public: Decoder120ms(int sample_rate_hz, SpeechType speech_type) diff --git a/modules/audio_coding/neteq/neteq_network_stats_unittest.cc b/modules/audio_coding/neteq/neteq_network_stats_unittest.cc index 8f72734d23..2c68501a98 100644 --- a/modules/audio_coding/neteq/neteq_network_stats_unittest.cc +++ b/modules/audio_coding/neteq/neteq_network_stats_unittest.cc @@ -188,11 +188,11 @@ class NetEqNetworkStatsTest { : 0xffffffff); } - // |stats_ref| + // `stats_ref` // expects.x = -1, do not care - // expects.x = 0, 'x' in current stats should equal 'x' in |stats_ref| - // expects.x = 1, 'x' in current stats should < 'x' in |stats_ref| - // expects.x = 2, 'x' in current stats should > 'x' in |stats_ref| + // expects.x = 0, 'x' in current stats should equal 'x' in `stats_ref` + // expects.x = 1, 'x' in current stats should < 'x' in `stats_ref` + // expects.x = 2, 'x' in current stats should > 'x' in `stats_ref` void CheckNetworkStatistics(NetEqNetworkStatsCheck expects) { NetEqNetworkStatistics stats; neteq_->NetworkStatistics(&stats); @@ -229,7 +229,7 @@ class NetEqNetworkStatsTest { uint32_t time_now; uint32_t next_send_time; - // Initiate |last_lost_time_|. + // Initiate `last_lost_time_`. time_now = next_send_time = last_lost_time_ = rtp_generator_->GetRtpHeader( kPayloadType, frame_size_samples_, &rtp_header_); for (int k = 0; k < num_loops; ++k) { @@ -274,7 +274,7 @@ class NetEqNetworkStatsTest { // Next we introduce packet losses. SetPacketLossRate(0.1); - expects.stats_ref.expand_rate = expects.stats_ref.speech_expand_rate = 1065; + expects.stats_ref.expand_rate = expects.stats_ref.speech_expand_rate = 898; RunTest(50, expects); // Next we enable FEC. diff --git a/modules/audio_coding/neteq/neteq_unittest.cc b/modules/audio_coding/neteq/neteq_unittest.cc index bdd90e96cc..4ff1a431e1 100644 --- a/modules/audio_coding/neteq/neteq_unittest.cc +++ b/modules/audio_coding/neteq/neteq_unittest.cc @@ -44,36 +44,10 @@ ABSL_FLAG(bool, gen_ref, false, "Generate reference files."); namespace webrtc { -namespace { - -const std::string& PlatformChecksum(const std::string& checksum_general, - const std::string& checksum_android_32, - const std::string& checksum_android_64, - const std::string& checksum_win_32, - const std::string& checksum_win_64) { -#if defined(WEBRTC_ANDROID) -#ifdef WEBRTC_ARCH_64_BITS - return checksum_android_64; -#else - return checksum_android_32; -#endif // WEBRTC_ARCH_64_BITS -#elif defined(WEBRTC_WIN) -#ifdef WEBRTC_ARCH_64_BITS - return checksum_win_64; -#else - return checksum_win_32; -#endif // WEBRTC_ARCH_64_BITS -#else - return checksum_general; -#endif // WEBRTC_WIN -} - -} // namespace - - -#if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \ - (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \ - defined(WEBRTC_CODEC_ILBC) && !defined(WEBRTC_ARCH_ARM64) +#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && \ + defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \ + (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \ + defined(WEBRTC_CODEC_ILBC) #define MAYBE_TestBitExactness TestBitExactness #else #define MAYBE_TestBitExactness DISABLED_TestBitExactness @@ -82,79 +56,57 @@ TEST_F(NetEqDecodingTest, MAYBE_TestBitExactness) { const std::string input_rtp_file = webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp"); - const std::string output_checksum = PlatformChecksum( -// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different -// checksum. -#if defined(WEBRTC_LINUX) && defined(NDEBUG) && defined(WEBRTC_ARCH_X86) - "8d9c177b7f2f9398c0944a851edffae214de2c56", -#else - "6c35140ce4d75874bdd60aa1872400b05fd05ca2", -#endif - "ab451bb8301d9a92fbf4de91556b56f1ea38b4ce", "not used", - "6c35140ce4d75874bdd60aa1872400b05fd05ca2", - "64b46bb3c1165537a880ae8404afce2efba456c0"); - - const std::string network_stats_checksum = PlatformChecksum( -// TODO(bugs.webrtc.org/12941): Linux x86 optimized builds have a different -// checksum. -#if defined(WEBRTC_LINUX) && defined(NDEBUG) && defined(WEBRTC_ARCH_X86) - "8cc08e3cd6801dcba4fcc15eb4036c19296a140d", -#else - "90594d85fa31d3d9584d79293bf7aa4ee55ed751", -#endif - "77b9c3640b81aff6a38d69d07dd782d39c15321d", "not used", - "90594d85fa31d3d9584d79293bf7aa4ee55ed751", - "90594d85fa31d3d9584d79293bf7aa4ee55ed751"); + const std::string output_checksum = + "ba4fae83a52f5e9d95b0910f05d540114285697b"; + + const std::string network_stats_checksum = + "fa878a8464ef1cb3d01503b7f927c3e2ce6f02c4"; DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum, absl::GetFlag(FLAGS_gen_ref)); } -#if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \ - defined(WEBRTC_CODEC_OPUS) +#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && \ + defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && defined(WEBRTC_CODEC_OPUS) #define MAYBE_TestOpusBitExactness TestOpusBitExactness #else #define MAYBE_TestOpusBitExactness DISABLED_TestOpusBitExactness #endif -// TODO(http://bugs.webrtc.org/12518): Enable the test after Opus has been -// updated. -TEST_F(NetEqDecodingTest, DISABLED_TestOpusBitExactness) { +TEST_F(NetEqDecodingTest, MAYBE_TestOpusBitExactness) { const std::string input_rtp_file = webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp"); - const std::string maybe_sse = - "c7887ff60eecf460332c6c7a28c81561f9e8a40f" - "|673dd422cfc174152536d3b13af64f9722520ab5"; - const std::string output_checksum = PlatformChecksum( - maybe_sse, "e39283dd61a89cead3786ef8642d2637cc447296", - "53d8073eb848b70974cba9e26424f4946508fd19", maybe_sse, maybe_sse); + // The checksum depends on SSE being enabled, the second part is the non-SSE + // checksum. + const std::string output_checksum = + "6e23d8827ae54ca352e1448ae363bdfd2878c78e|" + "47cddbf3494b0233f48cb350676e704807237545"; const std::string network_stats_checksum = - PlatformChecksum("c438bfa3b018f77691279eb9c63730569f54585c", - "8a474ed0992591e0c84f593824bb05979c3de157", - "9a05378dbf7e6edd56cdeb8ec45bcd6d8589623c", - "c438bfa3b018f77691279eb9c63730569f54585c", - "c438bfa3b018f77691279eb9c63730569f54585c"); + "f89a9533dbb35a4c449b44c3ed120f7f1c7f90b6"; DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum, absl::GetFlag(FLAGS_gen_ref)); } -// TODO(http://bugs.webrtc.org/12518): Enable the test after Opus has been -// updated. -TEST_F(NetEqDecodingTest, DISABLED_TestOpusDtxBitExactness) { +#if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && \ + defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && defined(WEBRTC_CODEC_OPUS) +#define MAYBE_TestOpusDtxBitExactness TestOpusDtxBitExactness +#else +#define MAYBE_TestOpusDtxBitExactness DISABLED_TestOpusDtxBitExactness +#endif +TEST_F(NetEqDecodingTest, MAYBE_TestOpusDtxBitExactness) { const std::string input_rtp_file = webrtc::test::ResourcePath("audio_coding/neteq_opus_dtx", "rtp"); - const std::string maybe_sse = - "0fb0a3d6b3758ca6e108368bb777cd38d0a865af" - "|79cfb99a21338ba977eb0e15eb8464e2db9436f8"; - const std::string output_checksum = PlatformChecksum( - maybe_sse, "b6632690f8d7c2340c838df2821fc014f1cc8360", - "f890b9eb9bc5ab8313489230726b297f6a0825af", maybe_sse, maybe_sse); + // The checksum depends on SSE being enabled, the second part is the non-SSE + // checksum. + const std::string output_checksum = + "5cea4a8e750842ac67b79e8e2ce6a0a1c01f8130|" + "e97e32a77355e7ce46a2dc2f43bf1c2805530fcb"; const std::string network_stats_checksum = - "18983bb67a57628c604dbdefa99574c6e0c5bb48"; + "dc8447b9fee1a21fd5d1f4045d62b982a3fb0215"; DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum, absl::GetFlag(FLAGS_gen_ref)); @@ -305,7 +257,7 @@ TEST_F(NetEqDecodingTest, MAYBE_DecoderError) { PopulateRtpInfo(0, 0, &rtp_info); rtp_info.payloadType = 103; // iSAC, but the payload is invalid. EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload)); - // Set all of |out_data_| to 1, and verify that it was set to 0 by the call + // Set all of `out_data_` to 1, and verify that it was set to 0 by the call // to GetAudio. int16_t* out_frame_data = out_frame_.mutable_data(); for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) { @@ -327,7 +279,7 @@ TEST_F(NetEqDecodingTest, MAYBE_DecoderError) { } TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) { - // Set all of |out_data_| to 1, and verify that it was set to 0 by the call + // Set all of `out_data_` to 1, and verify that it was set to 0 by the call // to GetAudio. int16_t* out_frame_data = out_frame_.mutable_data(); for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) { @@ -371,7 +323,7 @@ class NetEqBgnTest : public NetEqDecodingTest { AudioFrame output; test::AudioLoop input; // We are using the same 32 kHz input file for all tests, regardless of - // |sampling_rate_hz|. The output may sound weird, but the test is still + // `sampling_rate_hz`. The output may sound weird, but the test is still // valid. ASSERT_TRUE(input.Init( webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"), @@ -384,7 +336,6 @@ class NetEqBgnTest : public NetEqDecodingTest { PopulateRtpInfo(0, 0, &rtp_info); rtp_info.payloadType = payload_type; - uint32_t receive_timestamp = 0; bool muted; for (int n = 0; n < 10; ++n) { // Insert few packets and get audio. auto block = input.GetNextBlock(); @@ -405,8 +356,6 @@ class NetEqBgnTest : public NetEqDecodingTest { rtp_info.timestamp += rtc::checked_cast(expected_samples_per_channel); rtp_info.sequenceNumber++; - receive_timestamp += - rtc::checked_cast(expected_samples_per_channel); } output.Reset(); @@ -534,7 +483,7 @@ TEST_F(NetEqDecodingTest, DiscardDuplicateCng) { ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView( payload, payload_len))); - // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since + // Pull audio until we have played `kCngPeriodMs` of CNG. Start at 10 ms since // we have already pulled out CNG once. for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) { ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); @@ -546,11 +495,16 @@ TEST_F(NetEqDecodingTest, DiscardDuplicateCng) { out_frame_.timestamp_ + out_frame_.samples_per_channel_); } - // Insert speech again. ++seq_no; timestamp += kCngPeriodSamples; - PopulateRtpInfo(seq_no, timestamp, &rtp_info); - ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload)); + uint32_t first_speech_timestamp = timestamp; + // Insert speech again. + for (int i = 0; i < 3; ++i) { + PopulateRtpInfo(seq_no, timestamp, &rtp_info); + ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload)); + ++seq_no; + timestamp += kSamples; + } // Pull audio once and verify that the output is speech again. ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); @@ -558,7 +512,7 @@ TEST_F(NetEqDecodingTest, DiscardDuplicateCng) { EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); absl::optional playout_timestamp = neteq_->GetPlayoutTimestamp(); ASSERT_TRUE(playout_timestamp); - EXPECT_EQ(timestamp + kSamples - algorithmic_delay_samples, + EXPECT_EQ(first_speech_timestamp + kSamples - algorithmic_delay_samples, *playout_timestamp); } @@ -1115,186 +1069,5 @@ TEST(NetEqNoTimeStretchingMode, RunTest) { EXPECT_EQ(0, stats.preemptive_rate); } -namespace { -// Helper classes and data types and functions for NetEqOutputDelayTest. - -class VectorAudioSink : public AudioSink { - public: - // Does not take ownership of the vector. - VectorAudioSink(std::vector* output_vector) : v_(output_vector) {} - - virtual ~VectorAudioSink() = default; - - bool WriteArray(const int16_t* audio, size_t num_samples) override { - v_->reserve(v_->size() + num_samples); - for (size_t i = 0; i < num_samples; ++i) { - v_->push_back(audio[i]); - } - return true; - } - - private: - std::vector* const v_; -}; - -struct TestResult { - NetEqLifetimeStatistics lifetime_stats; - NetEqNetworkStatistics network_stats; - absl::optional playout_timestamp; - int target_delay_ms; - int filtered_current_delay_ms; - int sample_rate_hz; -}; - -// This class is used as callback object to NetEqTest to collect some stats -// at the end of the simulation. -class SimEndStatsCollector : public NetEqSimulationEndedCallback { - public: - SimEndStatsCollector(TestResult& result) : result_(result) {} - - void SimulationEnded(int64_t /*simulation_time_ms*/, NetEq* neteq) override { - result_.playout_timestamp = neteq->GetPlayoutTimestamp(); - result_.target_delay_ms = neteq->TargetDelayMs(); - result_.filtered_current_delay_ms = neteq->FilteredCurrentDelayMs(); - result_.sample_rate_hz = neteq->last_output_sample_rate_hz(); - } - - private: - TestResult& result_; -}; - -TestResult DelayLineNetEqTest(int delay_ms, - std::vector* output_vector) { - NetEq::Config config; - config.for_test_no_time_stretching = true; - config.extra_output_delay_ms = delay_ms; - auto codecs = NetEqTest::StandardDecoderMap(); - NetEqPacketSourceInput::RtpHeaderExtensionMap rtp_ext_map = { - {1, kRtpExtensionAudioLevel}, - {3, kRtpExtensionAbsoluteSendTime}, - {5, kRtpExtensionTransportSequenceNumber}, - {7, kRtpExtensionVideoContentType}, - {8, kRtpExtensionVideoTiming}}; - std::unique_ptr input = std::make_unique( - webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp"), - rtp_ext_map, absl::nullopt /*No SSRC filter*/); - std::unique_ptr input_time_limit( - new TimeLimitedNetEqInput(std::move(input), 10000)); - std::unique_ptr output = - std::make_unique(output_vector); - - TestResult result; - SimEndStatsCollector stats_collector(result); - NetEqTest::Callbacks callbacks; - callbacks.simulation_ended_callback = &stats_collector; - - NetEqTest test(config, CreateBuiltinAudioDecoderFactory(), codecs, - /*text_log=*/nullptr, /*neteq_factory=*/nullptr, - /*input=*/std::move(input_time_limit), std::move(output), - callbacks); - test.Run(); - result.lifetime_stats = test.LifetimeStats(); - result.network_stats = test.SimulationStats(); - return result; -} -} // namespace - -// Tests the extra output delay functionality of NetEq. -TEST(NetEqOutputDelayTest, RunTest) { - std::vector output; - const auto result_no_delay = DelayLineNetEqTest(0, &output); - std::vector output_delayed; - constexpr int kDelayMs = 100; - const auto result_delay = DelayLineNetEqTest(kDelayMs, &output_delayed); - - // Verify that the loss concealment remains unchanged. The point of the delay - // is to not affect the jitter buffering behavior. - // First verify that there are concealments in the test. - EXPECT_GT(result_no_delay.lifetime_stats.concealed_samples, 0u); - // And that not all of the output is concealment. - EXPECT_GT(result_no_delay.lifetime_stats.total_samples_received, - result_no_delay.lifetime_stats.concealed_samples); - // Now verify that they remain unchanged by the delay. - EXPECT_EQ(result_no_delay.lifetime_stats.concealed_samples, - result_delay.lifetime_stats.concealed_samples); - // Accelerate and pre-emptive expand should also be unchanged. - EXPECT_EQ(result_no_delay.lifetime_stats.inserted_samples_for_deceleration, - result_delay.lifetime_stats.inserted_samples_for_deceleration); - EXPECT_EQ(result_no_delay.lifetime_stats.removed_samples_for_acceleration, - result_delay.lifetime_stats.removed_samples_for_acceleration); - // Verify that delay stats are increased with the delay chain. - EXPECT_EQ( - result_no_delay.lifetime_stats.jitter_buffer_delay_ms + - kDelayMs * result_no_delay.lifetime_stats.jitter_buffer_emitted_count, - result_delay.lifetime_stats.jitter_buffer_delay_ms); - EXPECT_EQ( - result_no_delay.lifetime_stats.jitter_buffer_target_delay_ms + - kDelayMs * result_no_delay.lifetime_stats.jitter_buffer_emitted_count, - result_delay.lifetime_stats.jitter_buffer_target_delay_ms); - EXPECT_EQ(result_no_delay.network_stats.current_buffer_size_ms + kDelayMs, - result_delay.network_stats.current_buffer_size_ms); - EXPECT_EQ(result_no_delay.network_stats.preferred_buffer_size_ms + kDelayMs, - result_delay.network_stats.preferred_buffer_size_ms); - EXPECT_EQ(result_no_delay.network_stats.mean_waiting_time_ms + kDelayMs, - result_delay.network_stats.mean_waiting_time_ms); - EXPECT_EQ(result_no_delay.network_stats.median_waiting_time_ms + kDelayMs, - result_delay.network_stats.median_waiting_time_ms); - EXPECT_EQ(result_no_delay.network_stats.min_waiting_time_ms + kDelayMs, - result_delay.network_stats.min_waiting_time_ms); - EXPECT_EQ(result_no_delay.network_stats.max_waiting_time_ms + kDelayMs, - result_delay.network_stats.max_waiting_time_ms); - - ASSERT_TRUE(result_no_delay.playout_timestamp); - ASSERT_TRUE(result_delay.playout_timestamp); - EXPECT_EQ(*result_no_delay.playout_timestamp - - static_cast( - kDelayMs * - rtc::CheckedDivExact(result_no_delay.sample_rate_hz, 1000)), - *result_delay.playout_timestamp); - EXPECT_EQ(result_no_delay.target_delay_ms + kDelayMs, - result_delay.target_delay_ms); - EXPECT_EQ(result_no_delay.filtered_current_delay_ms + kDelayMs, - result_delay.filtered_current_delay_ms); - - // Verify expected delay in decoded signal. The test vector uses 8 kHz sample - // rate, so the delay will be 8 times the delay in ms. - constexpr size_t kExpectedDelaySamples = kDelayMs * 8; - for (size_t i = 0; - i < output.size() && i + kExpectedDelaySamples < output_delayed.size(); - ++i) { - EXPECT_EQ(output[i], output_delayed[i + kExpectedDelaySamples]); - } -} - -// Tests the extra output delay functionality of NetEq when configured via -// field trial. -TEST(NetEqOutputDelayTest, RunTestWithFieldTrial) { - test::ScopedFieldTrials field_trial( - "WebRTC-Audio-NetEqExtraDelay/Enabled-50/"); - constexpr int kExpectedDelayMs = 50; - std::vector output; - const auto result = DelayLineNetEqTest(0, &output); - - // The base delay values are taken from the resuts of the non-delayed case in - // NetEqOutputDelayTest.RunTest above. - EXPECT_EQ(20 + kExpectedDelayMs, result.target_delay_ms); - EXPECT_EQ(24 + kExpectedDelayMs, result.filtered_current_delay_ms); -} - -// Set a non-multiple-of-10 value in the field trial, and verify that we don't -// crash, and that the result is rounded down. -TEST(NetEqOutputDelayTest, RunTestWithFieldTrialOddValue) { - test::ScopedFieldTrials field_trial( - "WebRTC-Audio-NetEqExtraDelay/Enabled-103/"); - constexpr int kRoundedDelayMs = 100; - std::vector output; - const auto result = DelayLineNetEqTest(0, &output); - - // The base delay values are taken from the resuts of the non-delayed case in - // NetEqOutputDelayTest.RunTest above. - EXPECT_EQ(20 + kRoundedDelayMs, result.target_delay_ms); - EXPECT_EQ(24 + kRoundedDelayMs, result.filtered_current_delay_ms); -} - } // namespace test } // namespace webrtc diff --git a/modules/audio_coding/neteq/normal.cc b/modules/audio_coding/neteq/normal.cc index 3ed0e26a75..461ee7fa4a 100644 --- a/modules/audio_coding/neteq/normal.cc +++ b/modules/audio_coding/neteq/normal.cc @@ -45,7 +45,7 @@ int Normal::Process(const int16_t* input, const int fs_mult = fs_hz_ / 8000; RTC_DCHECK_GT(fs_mult, 0); // fs_shift = log2(fs_mult), rounded down. - // Note that |fs_shift| is not "exact" for 48 kHz. + // Note that `fs_shift` is not "exact" for 48 kHz. // TODO(hlundin): Investigate this further. const int fs_shift = 30 - WebRtcSpl_NormW32(fs_mult); @@ -83,7 +83,7 @@ int Normal::Process(const int16_t* input, size_t energy_length = std::min(static_cast(fs_mult * 64), length_per_channel); int scaling = 6 + fs_shift - WebRtcSpl_NormW32(decoded_max * decoded_max); - scaling = std::max(scaling, 0); // |scaling| should always be >= 0. + scaling = std::max(scaling, 0); // `scaling` should always be >= 0. int32_t energy = WebRtcSpl_DotProductWithScale(signal.get(), signal.get(), energy_length, scaling); int32_t scaled_energy_length = @@ -159,7 +159,7 @@ int Normal::Process(const int16_t* input, if (cng_decoder) { // Generate long enough for 48kHz. - if (!cng_decoder->Generate(cng_output, 0)) { + if (!cng_decoder->Generate(cng_output, false)) { // Error returned; set return vector to all zeros. memset(cng_output, 0, sizeof(cng_output)); } diff --git a/modules/audio_coding/neteq/normal.h b/modules/audio_coding/neteq/normal.h index d6dc84a2d6..772293b605 100644 --- a/modules/audio_coding/neteq/normal.h +++ b/modules/audio_coding/neteq/normal.h @@ -17,7 +17,6 @@ #include "api/neteq/neteq.h" #include "modules/audio_coding/neteq/statistics_calculator.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/numerics/safe_conversions.h" namespace webrtc { @@ -49,11 +48,14 @@ class Normal { virtual ~Normal() {} - // Performs the "Normal" operation. The decoder data is supplied in |input|, - // having |length| samples in total for all channels (interleaved). The - // result is written to |output|. The number of channels allocated in - // |output| defines the number of channels that will be used when - // de-interleaving |input|. |last_mode| contains the mode used in the previous + Normal(const Normal&) = delete; + Normal& operator=(const Normal&) = delete; + + // Performs the "Normal" operation. The decoder data is supplied in `input`, + // having `length` samples in total for all channels (interleaved). The + // result is written to `output`. The number of channels allocated in + // `output` defines the number of channels that will be used when + // de-interleaving `input`. `last_mode` contains the mode used in the previous // GetAudio call (i.e., not the current one). int Process(const int16_t* input, size_t length, @@ -68,8 +70,6 @@ class Normal { const size_t samples_per_ms_; const int16_t default_win_slope_Q14_; StatisticsCalculator* const statistics_; - - RTC_DISALLOW_COPY_AND_ASSIGN(Normal); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/normal_unittest.cc b/modules/audio_coding/neteq/normal_unittest.cc index 7e533bb1eb..4554d79576 100644 --- a/modules/audio_coding/neteq/normal_unittest.cc +++ b/modules/audio_coding/neteq/normal_unittest.cc @@ -51,7 +51,7 @@ TEST(Normal, CreateAndDestroy) { StatisticsCalculator statistics; Expand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs, channels); Normal normal(fs, &db, bgn, &expand, &statistics); - EXPECT_CALL(db, Die()); // Called when |db| goes out of scope. + EXPECT_CALL(db, Die()); // Called when `db` goes out of scope. } TEST(Normal, AvoidDivideByZero) { @@ -85,8 +85,8 @@ TEST(Normal, AvoidDivideByZero) { EXPECT_EQ(input_size_samples, normal.Process(input, input_size_samples, NetEq::Mode::kExpand, &output)); - EXPECT_CALL(db, Die()); // Called when |db| goes out of scope. - EXPECT_CALL(expand, Die()); // Called when |expand| goes out of scope. + EXPECT_CALL(db, Die()); // Called when `db` goes out of scope. + EXPECT_CALL(expand, Die()); // Called when `expand` goes out of scope. } TEST(Normal, InputLengthAndChannelsDoNotMatch) { @@ -109,8 +109,8 @@ TEST(Normal, InputLengthAndChannelsDoNotMatch) { EXPECT_EQ(0, normal.Process(input, input_len, NetEq::Mode::kExpand, &output)); EXPECT_EQ(0u, output.Size()); - EXPECT_CALL(db, Die()); // Called when |db| goes out of scope. - EXPECT_CALL(expand, Die()); // Called when |expand| goes out of scope. + EXPECT_CALL(db, Die()); // Called when `db` goes out of scope. + EXPECT_CALL(expand, Die()); // Called when `expand` goes out of scope. } TEST(Normal, LastModeExpand120msPacket) { @@ -138,8 +138,8 @@ TEST(Normal, LastModeExpand120msPacket) { EXPECT_EQ(kPacketsizeBytes, output.Size()); - EXPECT_CALL(db, Die()); // Called when |db| goes out of scope. - EXPECT_CALL(expand, Die()); // Called when |expand| goes out of scope. + EXPECT_CALL(db, Die()); // Called when `db` goes out of scope. + EXPECT_CALL(expand, Die()); // Called when `expand` goes out of scope. } // TODO(hlundin): Write more tests. diff --git a/modules/audio_coding/neteq/packet.h b/modules/audio_coding/neteq/packet.h index 4455494f98..0c6f204edb 100644 --- a/modules/audio_coding/neteq/packet.h +++ b/modules/audio_coding/neteq/packet.h @@ -84,8 +84,8 @@ struct Packet { // Packets should generally be moved around but sometimes it's useful to make // a copy, for example for testing purposes. NOTE: Will only work for - // un-parsed packets, i.e. |frame| must be unset. The payload will, however, - // be copied. |waiting_time| will also not be copied. + // un-parsed packets, i.e. `frame` must be unset. The payload will, however, + // be copied. `waiting_time` will also not be copied. Packet Clone() const; Packet& operator=(Packet&& b); diff --git a/modules/audio_coding/neteq/packet_buffer.cc b/modules/audio_coding/neteq/packet_buffer.cc index 86ae8475ce..f6b5a476c9 100644 --- a/modules/audio_coding/neteq/packet_buffer.cc +++ b/modules/audio_coding/neteq/packet_buffer.cc @@ -33,7 +33,7 @@ namespace webrtc { namespace { // Predicate used when inserting packets in the buffer list. -// Operator() returns true when |packet| goes before |new_packet|. +// Operator() returns true when `packet` goes before `new_packet`. class NewTimestampIsLarger { public: explicit NewTimestampIsLarger(const Packet& new_packet) @@ -183,16 +183,16 @@ int PacketBuffer::InsertPacket(Packet&& packet, PacketList::reverse_iterator rit = std::find_if( buffer_.rbegin(), buffer_.rend(), NewTimestampIsLarger(packet)); - // The new packet is to be inserted to the right of |rit|. If it has the same - // timestamp as |rit|, which has a higher priority, do not insert the new + // The new packet is to be inserted to the right of `rit`. If it has the same + // timestamp as `rit`, which has a higher priority, do not insert the new // packet to list. if (rit != buffer_.rend() && packet.timestamp == rit->timestamp) { LogPacketDiscarded(packet.priority.codec_level, stats); return return_val; } - // The new packet is to be inserted to the left of |it|. If it has the same - // timestamp as |it|, which has a lower priority, replace |it| with the new + // The new packet is to be inserted to the left of `it`. If it has the same + // timestamp as `it`, which has a lower priority, replace `it` with the new // packet. PacketList::iterator it = rit.base(); if (it != buffer_.end() && packet.timestamp == it->timestamp) { diff --git a/modules/audio_coding/neteq/packet_buffer.h b/modules/audio_coding/neteq/packet_buffer.h index cd2adf7111..c6fb47ffbf 100644 --- a/modules/audio_coding/neteq/packet_buffer.h +++ b/modules/audio_coding/neteq/packet_buffer.h @@ -15,7 +15,6 @@ #include "modules/audio_coding/neteq/decoder_database.h" #include "modules/audio_coding/neteq/packet.h" #include "modules/include/module_common_types_public.h" // IsNewerTimestamp -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -45,12 +44,15 @@ class PacketBuffer { }; // Constructor creates a buffer which can hold a maximum of - // |max_number_of_packets| packets. + // `max_number_of_packets` packets. PacketBuffer(size_t max_number_of_packets, const TickTimer* tick_timer); // Deletes all packets in the buffer before destroying the buffer. virtual ~PacketBuffer(); + PacketBuffer(const PacketBuffer&) = delete; + PacketBuffer& operator=(const PacketBuffer&) = delete; + // Flushes the buffer and deletes all packets in it. virtual void Flush(StatisticsCalculator* stats); @@ -63,7 +65,7 @@ class PacketBuffer { // Returns true for an empty buffer. virtual bool Empty() const; - // Inserts |packet| into the buffer. The buffer will take over ownership of + // Inserts `packet` into the buffer. The buffer will take over ownership of // the packet object. // Returns PacketBuffer::kOK on success, PacketBuffer::kFlushed if the buffer // was flushed due to overfilling. @@ -93,14 +95,14 @@ class PacketBuffer { int target_level_ms); // Gets the timestamp for the first packet in the buffer and writes it to the - // output variable |next_timestamp|. + // output variable `next_timestamp`. // Returns PacketBuffer::kBufferEmpty if the buffer is empty, // PacketBuffer::kOK otherwise. virtual int NextTimestamp(uint32_t* next_timestamp) const; // Gets the timestamp for the first packet in the buffer with a timestamp no - // lower than the input limit |timestamp|. The result is written to the output - // variable |next_timestamp|. + // lower than the input limit `timestamp`. The result is written to the output + // variable `next_timestamp`. // Returns PacketBuffer::kBufferEmpty if the buffer is empty, // PacketBuffer::kOK otherwise. virtual int NextHigherTimestamp(uint32_t timestamp, @@ -154,11 +156,11 @@ class PacketBuffer { virtual bool ContainsDtxOrCngPacket( const DecoderDatabase* decoder_database) const; - // Static method returning true if |timestamp| is older than |timestamp_limit| - // but less than |horizon_samples| behind |timestamp_limit|. For instance, + // Static method returning true if `timestamp` is older than `timestamp_limit` + // but less than `horizon_samples` behind `timestamp_limit`. For instance, // with timestamp_limit = 100 and horizon_samples = 10, a timestamp in the // range (90, 100) is considered obsolete, and will yield true. - // Setting |horizon_samples| to 0 is the same as setting it to 2^31, i.e., + // Setting `horizon_samples` to 0 is the same as setting it to 2^31, i.e., // half the 32-bit timestamp range. static bool IsObsoleteTimestamp(uint32_t timestamp, uint32_t timestamp_limit, @@ -173,7 +175,6 @@ class PacketBuffer { size_t max_number_of_packets_; PacketList buffer_; const TickTimer* tick_timer_; - RTC_DISALLOW_COPY_AND_ASSIGN(PacketBuffer); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/post_decode_vad.h b/modules/audio_coding/neteq/post_decode_vad.h index ca7cabfd43..3bd91b9edb 100644 --- a/modules/audio_coding/neteq/post_decode_vad.h +++ b/modules/audio_coding/neteq/post_decode_vad.h @@ -16,7 +16,6 @@ #include "api/audio_codecs/audio_decoder.h" #include "common_audio/vad/include/webrtc_vad.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -31,6 +30,9 @@ class PostDecodeVad { virtual ~PostDecodeVad(); + PostDecodeVad(const PostDecodeVad&) = delete; + PostDecodeVad& operator=(const PostDecodeVad&) = delete; + // Enables post-decode VAD. void Enable(); @@ -40,8 +42,8 @@ class PostDecodeVad { // Initializes post-decode VAD. void Init(); - // Updates post-decode VAD with the audio data in |signal| having |length| - // samples. The data is of type |speech_type|, at the sample rate |fs_hz|. + // Updates post-decode VAD with the audio data in `signal` having `length` + // samples. The data is of type `speech_type`, at the sample rate `fs_hz`. void Update(int16_t* signal, size_t length, AudioDecoder::SpeechType speech_type, @@ -63,8 +65,6 @@ class PostDecodeVad { bool active_speech_; int sid_interval_counter_; ::VadInst* vad_instance_; - - RTC_DISALLOW_COPY_AND_ASSIGN(PostDecodeVad); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/preemptive_expand.cc b/modules/audio_coding/neteq/preemptive_expand.cc index cad8d6a50f..232170b177 100644 --- a/modules/audio_coding/neteq/preemptive_expand.cc +++ b/modules/audio_coding/neteq/preemptive_expand.cc @@ -26,7 +26,7 @@ PreemptiveExpand::ReturnCodes PreemptiveExpand::Process( size_t* length_change_samples) { old_data_length_per_channel_ = old_data_length; // Input length must be (almost) 30 ms. - // Also, the new part must be at least |overlap_samples_| elements. + // Also, the new part must be at least `overlap_samples_` elements. static const size_t k15ms = 120; // 15 ms = 120 samples at 8 kHz sample rate. if (num_channels_ == 0 || input_length / num_channels_ < (2 * k15ms - 1) * fs_mult_ || @@ -64,7 +64,7 @@ PreemptiveExpand::ReturnCodes PreemptiveExpand::CheckCriteriaAndStretch( bool active_speech, bool /*fast_mode*/, AudioMultiVector* output) const { - // Pre-calculate common multiplication with |fs_mult_|. + // Pre-calculate common multiplication with `fs_mult_`. // 120 corresponds to 15 ms. size_t fs_mult_120 = static_cast(fs_mult_ * 120); // Check for strong correlation (>0.9 in Q14) and at least 15 ms new data, @@ -80,12 +80,12 @@ PreemptiveExpand::ReturnCodes PreemptiveExpand::CheckCriteriaAndStretch( // Copy first part, including cross-fade region. output->PushBackInterleaved(rtc::ArrayView( input, (unmodified_length + peak_index) * num_channels_)); - // Copy the last |peak_index| samples up to 15 ms to |temp_vector|. + // Copy the last `peak_index` samples up to 15 ms to `temp_vector`. AudioMultiVector temp_vector(num_channels_); temp_vector.PushBackInterleaved(rtc::ArrayView( &input[(unmodified_length - peak_index) * num_channels_], peak_index * num_channels_)); - // Cross-fade |temp_vector| onto the end of |output|. + // Cross-fade `temp_vector` onto the end of `output`. output->CrossFade(temp_vector, peak_index); // Copy the last unmodified part, 15 ms + pitch period until the end. output->PushBackInterleaved(rtc::ArrayView( diff --git a/modules/audio_coding/neteq/preemptive_expand.h b/modules/audio_coding/neteq/preemptive_expand.h index e7d2bad94b..6338b993fd 100644 --- a/modules/audio_coding/neteq/preemptive_expand.h +++ b/modules/audio_coding/neteq/preemptive_expand.h @@ -15,7 +15,6 @@ #include #include "modules/audio_coding/neteq/time_stretch.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -36,10 +35,13 @@ class PreemptiveExpand : public TimeStretch { old_data_length_per_channel_(0), overlap_samples_(overlap_samples) {} + PreemptiveExpand(const PreemptiveExpand&) = delete; + PreemptiveExpand& operator=(const PreemptiveExpand&) = delete; + // This method performs the actual PreemptiveExpand operation. The samples are - // read from |input|, of length |input_length| elements, and are written to - // |output|. The number of samples added through time-stretching is - // is provided in the output |length_change_samples|. The method returns + // read from `input`, of length `input_length` elements, and are written to + // `output`. The number of samples added through time-stretching is + // is provided in the output `length_change_samples`. The method returns // the outcome of the operation as an enumerator value. ReturnCodes Process(const int16_t* pw16_decoded, size_t len, @@ -48,7 +50,7 @@ class PreemptiveExpand : public TimeStretch { size_t* length_change_samples); protected: - // Sets the parameters |best_correlation| and |peak_index| to suitable + // Sets the parameters `best_correlation` and `peak_index` to suitable // values when the signal contains no active speech. void SetParametersForPassiveSpeech(size_t input_length, int16_t* best_correlation, @@ -67,8 +69,6 @@ class PreemptiveExpand : public TimeStretch { private: size_t old_data_length_per_channel_; size_t overlap_samples_; - - RTC_DISALLOW_COPY_AND_ASSIGN(PreemptiveExpand); }; struct PreemptiveExpandFactory { diff --git a/modules/audio_coding/neteq/random_vector.h b/modules/audio_coding/neteq/random_vector.h index 1d3760055b..4a782f1116 100644 --- a/modules/audio_coding/neteq/random_vector.h +++ b/modules/audio_coding/neteq/random_vector.h @@ -14,8 +14,6 @@ #include #include -#include "rtc_base/constructor_magic.h" - namespace webrtc { // This class generates pseudo-random samples. @@ -26,6 +24,9 @@ class RandomVector { RandomVector() : seed_(777), seed_increment_(1) {} + RandomVector(const RandomVector&) = delete; + RandomVector& operator=(const RandomVector&) = delete; + void Reset(); void Generate(size_t length, int16_t* output); @@ -39,8 +40,6 @@ class RandomVector { private: uint32_t seed_; int16_t seed_increment_; - - RTC_DISALLOW_COPY_AND_ASSIGN(RandomVector); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/red_payload_splitter.cc b/modules/audio_coding/neteq/red_payload_splitter.cc index 2f21a5ff6c..7438f25301 100644 --- a/modules/audio_coding/neteq/red_payload_splitter.cc +++ b/modules/audio_coding/neteq/red_payload_splitter.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/neteq/red_payload_splitter.h" -#include #include #include @@ -28,9 +27,9 @@ namespace webrtc { // The method loops through a list of packets {A, B, C, ...}. Each packet is // split into its corresponding RED payloads, {A1, A2, ...}, which is -// temporarily held in the list |new_packets|. -// When the first packet in |packet_list| has been processed, the original -// packet is replaced by the new ones in |new_packets|, so that |packet_list| +// temporarily held in the list `new_packets`. +// When the first packet in `packet_list` has been processed, the original +// packet is replaced by the new ones in `new_packets`, so that `packet_list` // becomes: {A1, A2, ..., B, C, ...}. The method then continues with B, and C, // until all the original packets have been replaced by their split payloads. bool RedPayloadSplitter::SplitRed(PacketList* packet_list) { @@ -111,7 +110,7 @@ bool RedPayloadSplitter::SplitRed(PacketList* packet_list) { if (new_headers.size() <= kMaxRedBlocks) { // Populate the new packets with payload data. - // |payload_ptr| now points at the first payload byte. + // `payload_ptr` now points at the first payload byte. PacketList new_packets; // An empty list to store the split packets in. for (size_t i = 0; i != new_headers.size(); ++i) { const auto& new_header = new_headers[i]; @@ -137,21 +136,21 @@ bool RedPayloadSplitter::SplitRed(PacketList* packet_list) { /*ssrc=*/red_packet.packet_info.ssrc(), /*csrcs=*/std::vector(), /*rtp_timestamp=*/new_packet.timestamp, - /*audio_level=*/absl::nullopt, + red_packet.packet_info.audio_level(), /*absolute_capture_time=*/absl::nullopt, /*receive_time=*/red_packet.packet_info.receive_time()); new_packets.push_front(std::move(new_packet)); payload_ptr += payload_length; } // Insert new packets into original list, before the element pointed to by - // iterator |it|. + // iterator `it`. packet_list->splice(it, std::move(new_packets)); } else { RTC_LOG(LS_WARNING) << "SplitRed too many blocks: " << new_headers.size(); ret = false; } - // Remove |it| from the packet list. This operation effectively moves the - // iterator |it| to the next packet in the list. Thus, we do not have to + // Remove `it` from the packet list. This operation effectively moves the + // iterator `it` to the next packet in the list. Thus, we do not have to // increment it manually. it = packet_list->erase(it); } @@ -176,8 +175,8 @@ void RedPayloadSplitter::CheckRedPayloads( } else { if (this_payload_type != main_payload_type) { // We do not allow redundant payloads of a different type. - // Remove |it| from the packet list. This operation effectively - // moves the iterator |it| to the next packet in the list. Thus, we + // Remove `it` from the packet list. This operation effectively + // moves the iterator `it` to the next packet in the list. Thus, we // do not have to increment it manually. it = packet_list->erase(it); continue; diff --git a/modules/audio_coding/neteq/red_payload_splitter.h b/modules/audio_coding/neteq/red_payload_splitter.h index c54ffc0dae..2f48e4b7d4 100644 --- a/modules/audio_coding/neteq/red_payload_splitter.h +++ b/modules/audio_coding/neteq/red_payload_splitter.h @@ -12,7 +12,6 @@ #define MODULES_AUDIO_CODING_NETEQ_RED_PAYLOAD_SPLITTER_H_ #include "modules/audio_coding/neteq/packet.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -30,22 +29,22 @@ class RedPayloadSplitter { virtual ~RedPayloadSplitter() {} - // Splits each packet in |packet_list| into its separate RED payloads. Each + RedPayloadSplitter(const RedPayloadSplitter&) = delete; + RedPayloadSplitter& operator=(const RedPayloadSplitter&) = delete; + + // Splits each packet in `packet_list` into its separate RED payloads. Each // RED payload is packetized into a Packet. The original elements in - // |packet_list| are properly deleted, and replaced by the new packets. - // Note that all packets in |packet_list| must be RED payloads, i.e., have + // `packet_list` are properly deleted, and replaced by the new packets. + // Note that all packets in `packet_list` must be RED payloads, i.e., have // RED headers according to RFC 2198 at the very beginning of the payload. // Returns kOK or an error. virtual bool SplitRed(PacketList* packet_list); - // Checks all packets in |packet_list|. Packets that are DTMF events or + // Checks all packets in `packet_list`. Packets that are DTMF events or // comfort noise payloads are kept. Except that, only one single payload type // is accepted. Any packet with another payload type is discarded. virtual void CheckRedPayloads(PacketList* packet_list, const DecoderDatabase& decoder_database); - - private: - RTC_DISALLOW_COPY_AND_ASSIGN(RedPayloadSplitter); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/red_payload_splitter_unittest.cc b/modules/audio_coding/neteq/red_payload_splitter_unittest.cc index 7275232daa..a0ba5414ea 100644 --- a/modules/audio_coding/neteq/red_payload_splitter_unittest.cc +++ b/modules/audio_coding/neteq/red_payload_splitter_unittest.cc @@ -12,7 +12,6 @@ #include "modules/audio_coding/neteq/red_payload_splitter.h" -#include #include #include // pair @@ -71,9 +70,9 @@ void CreateOpusFecPayload(uint8_t* payload, // |0| Block PT | // +-+-+-+-+-+-+-+-+ -// Creates a RED packet, with |num_payloads| payloads, with payload types given -// by the values in array |payload_types| (which must be of length -// |num_payloads|). Each redundant payload is |timestamp_offset| samples +// Creates a RED packet, with `num_payloads` payloads, with payload types given +// by the values in array `payload_types` (which must be of length +// `num_payloads`). Each redundant payload is `timestamp_offset` samples // "behind" the the previous payload. Packet CreateRedPayload(size_t num_payloads, uint8_t* payload_types, @@ -110,7 +109,7 @@ Packet CreateRedPayload(size_t num_payloads, ++payload_ptr; } for (size_t i = 0; i < num_payloads; ++i) { - // Write |i| to all bytes in each payload. + // Write `i` to all bytes in each payload. if (embed_opus_fec) { CreateOpusFecPayload(payload_ptr, kPayloadLength, static_cast(i)); @@ -122,7 +121,7 @@ Packet CreateRedPayload(size_t num_payloads, return packet; } -// Create a packet with all payload bytes set to |payload_value|. +// Create a packet with all payload bytes set to `payload_value`. Packet CreatePacket(uint8_t payload_type, size_t payload_length, uint8_t payload_value, @@ -141,7 +140,7 @@ Packet CreatePacket(uint8_t payload_type, return packet; } -// Checks that |packet| has the attributes given in the remaining parameters. +// Checks that `packet` has the attributes given in the remaining parameters. void VerifyPacket(const Packet& packet, size_t payload_length, uint8_t payload_type, @@ -290,7 +289,7 @@ TEST(RedPayloadSplitter, TwoPacketsThreePayloads) { TEST(RedPayloadSplitter, CheckRedPayloads) { PacketList packet_list; for (uint8_t i = 0; i <= 3; ++i) { - // Create packet with payload type |i|, payload length 10 bytes, all 0. + // Create packet with payload type `i`, payload length 10 bytes, all 0. packet_list.push_back(CreatePacket(i, 10, 0)); } diff --git a/modules/audio_coding/neteq/relative_arrival_delay_tracker.cc b/modules/audio_coding/neteq/relative_arrival_delay_tracker.cc new file mode 100644 index 0000000000..b50ac80bab --- /dev/null +++ b/modules/audio_coding/neteq/relative_arrival_delay_tracker.cc @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_coding/neteq/relative_arrival_delay_tracker.h" + +#include + +#include "modules/include/module_common_types_public.h" + +namespace webrtc { + +absl::optional RelativeArrivalDelayTracker::Update(uint32_t timestamp, + int sample_rate_hz) { + if (sample_rate_hz <= 0) { + return absl::nullopt; + } + if (!last_timestamp_) { + // Restart relative delay esimation from this packet. + delay_history_.clear(); + packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch(); + newest_timestamp_ = timestamp; + last_timestamp_ = timestamp; + return absl::nullopt; + } + + const int expected_iat_ms = + 1000ll * static_cast(timestamp - *last_timestamp_) / + sample_rate_hz; + const int iat_ms = packet_iat_stopwatch_->ElapsedMs(); + const int iat_delay_ms = iat_ms - expected_iat_ms; + UpdateDelayHistory(iat_delay_ms, timestamp, sample_rate_hz); + int relative_delay = CalculateRelativePacketArrivalDelay(); + + packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch(); + last_timestamp_ = timestamp; + if (IsNewerTimestamp(timestamp, *newest_timestamp_)) { + newest_timestamp_ = timestamp; + } + + return relative_delay; +} + +void RelativeArrivalDelayTracker::Reset() { + delay_history_.clear(); + packet_iat_stopwatch_.reset(); + newest_timestamp_ = absl::nullopt; + last_timestamp_ = absl::nullopt; +} + +void RelativeArrivalDelayTracker::UpdateDelayHistory(int iat_delay_ms, + uint32_t timestamp, + int sample_rate_hz) { + PacketDelay delay; + delay.iat_delay_ms = iat_delay_ms; + delay.timestamp = timestamp; + delay_history_.push_back(delay); + while (static_cast(timestamp - delay_history_.front().timestamp) > + max_history_ms_ * sample_rate_hz / 1000) { + delay_history_.pop_front(); + } +} + +int RelativeArrivalDelayTracker::CalculateRelativePacketArrivalDelay() const { + // This effectively calculates arrival delay of a packet relative to the + // packet preceding the history window. If the arrival delay ever becomes + // smaller than zero, it means the reference packet is invalid, and we + // move the reference. + int relative_delay = 0; + for (const PacketDelay& delay : delay_history_) { + relative_delay += delay.iat_delay_ms; + relative_delay = std::max(relative_delay, 0); + } + return relative_delay; +} + +} // namespace webrtc diff --git a/modules/audio_coding/neteq/relative_arrival_delay_tracker.h b/modules/audio_coding/neteq/relative_arrival_delay_tracker.h new file mode 100644 index 0000000000..fed56be2b6 --- /dev/null +++ b/modules/audio_coding/neteq/relative_arrival_delay_tracker.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_CODING_NETEQ_RELATIVE_ARRIVAL_DELAY_TRACKER_H_ +#define MODULES_AUDIO_CODING_NETEQ_RELATIVE_ARRIVAL_DELAY_TRACKER_H_ + +#include +#include + +#include "absl/types/optional.h" +#include "api/neteq/tick_timer.h" + +namespace webrtc { + +class RelativeArrivalDelayTracker { + public: + RelativeArrivalDelayTracker(const TickTimer* tick_timer, int max_history_ms) + : tick_timer_(tick_timer), max_history_ms_(max_history_ms) {} + + absl::optional Update(uint32_t timestamp, int sample_rate_hz); + + void Reset(); + + absl::optional newest_timestamp() const { + return newest_timestamp_; + } + + private: + // Updates `delay_history_`. + void UpdateDelayHistory(int iat_delay_ms, + uint32_t timestamp, + int sample_rate_hz); + + // Calculate relative packet arrival delay from `delay_history_`. + int CalculateRelativePacketArrivalDelay() const; + + const TickTimer* tick_timer_; + const int max_history_ms_; + + struct PacketDelay { + int iat_delay_ms; + uint32_t timestamp; + }; + std::deque delay_history_; + + absl::optional newest_timestamp_; + absl::optional last_timestamp_; + + std::unique_ptr + packet_iat_stopwatch_; // Time elapsed since last packet. +}; + +} // namespace webrtc +#endif // MODULES_AUDIO_CODING_NETEQ_RELATIVE_ARRIVAL_DELAY_TRACKER_H_ diff --git a/modules/audio_coding/neteq/relative_arrival_delay_tracker_unittest.cc b/modules/audio_coding/neteq/relative_arrival_delay_tracker_unittest.cc new file mode 100644 index 0000000000..b4e9456e62 --- /dev/null +++ b/modules/audio_coding/neteq/relative_arrival_delay_tracker_unittest.cc @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_coding/neteq/relative_arrival_delay_tracker.h" + +#include "test/gtest.h" + +namespace webrtc { + +namespace { +constexpr int kMaxHistoryMs = 2000; +constexpr int kFs = 8000; +constexpr int kFrameSizeMs = 20; +constexpr int kTsIncrement = kFrameSizeMs * kFs / 1000; +constexpr uint32_t kTs = 0x12345678; +} // namespace + +TEST(RelativeArrivalDelayTrackerTest, RelativeArrivalDelay) { + TickTimer tick_timer; + RelativeArrivalDelayTracker tracker(&tick_timer, kMaxHistoryMs); + + EXPECT_FALSE(tracker.Update(kTs, kFs)); + + tick_timer.Increment(kFrameSizeMs / tick_timer.ms_per_tick()); + EXPECT_EQ(tracker.Update(kTs + kTsIncrement, kFs), 0); + + tick_timer.Increment(2 * kFrameSizeMs / tick_timer.ms_per_tick()); + EXPECT_EQ(tracker.Update(kTs + 2 * kTsIncrement, kFs), 20); + + EXPECT_EQ(tracker.Update(kTs, kFs), 60); // Reordered, 60ms delayed. + + tick_timer.Increment(2 * kFrameSizeMs / tick_timer.ms_per_tick()); + EXPECT_EQ(tracker.Update(kTs + 3 * kTsIncrement, kFs), 40); +} + +TEST(RelativeArrivalDelayTrackerTest, ReorderedPackets) { + TickTimer tick_timer; + RelativeArrivalDelayTracker tracker(&tick_timer, kMaxHistoryMs); + + // Insert first packet. + EXPECT_FALSE(tracker.Update(kTs, kFs)); + + // Insert reordered packet. + EXPECT_EQ(tracker.Update(kTs - 4 * kTsIncrement, kFs), 80); + EXPECT_EQ(tracker.newest_timestamp(), kTs); + + // Insert another reordered packet. + EXPECT_EQ(tracker.Update(kTs - kTsIncrement, kFs), 20); + EXPECT_EQ(tracker.newest_timestamp(), kTs); + + // Insert the next packet in order and verify that the relative delay is + // estimated based on the first inserted packet. + tick_timer.Increment(4 * kFrameSizeMs / tick_timer.ms_per_tick()); + EXPECT_EQ(tracker.Update(kTs + kTsIncrement, kFs), 60); + EXPECT_EQ(tracker.newest_timestamp(), kTs + kTsIncrement); +} + +TEST(RelativeArrivalDelayTrackerTest, MaxDelayHistory) { + TickTimer tick_timer; + RelativeArrivalDelayTracker tracker(&tick_timer, kMaxHistoryMs); + + EXPECT_FALSE(tracker.Update(kTs, kFs)); + + // Insert 20 ms iat delay in the delay history. + tick_timer.Increment(2 * kFrameSizeMs / tick_timer.ms_per_tick()); + EXPECT_EQ(tracker.Update(kTs + kTsIncrement, kFs), 20); + + // Insert next packet with a timestamp difference larger than maximum history + // size. This removes the previously inserted iat delay from the history. + tick_timer.Increment((kMaxHistoryMs + kFrameSizeMs) / + tick_timer.ms_per_tick()); + EXPECT_EQ( + tracker.Update(kTs + 2 * kTsIncrement + kFs * kMaxHistoryMs / 1000, kFs), + 0); +} + +} // namespace webrtc diff --git a/modules/audio_coding/neteq/reorder_optimizer.cc b/modules/audio_coding/neteq/reorder_optimizer.cc new file mode 100644 index 0000000000..f6e073fc88 --- /dev/null +++ b/modules/audio_coding/neteq/reorder_optimizer.cc @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_coding/neteq/reorder_optimizer.h" + +#include +#include +#include + +namespace webrtc { + +namespace { + +constexpr int kDelayBuckets = 100; +constexpr int kBucketSizeMs = 20; + +} // namespace + +ReorderOptimizer::ReorderOptimizer(int forget_factor, + int ms_per_loss_percent, + absl::optional start_forget_weight) + : histogram_(kDelayBuckets, forget_factor, start_forget_weight), + ms_per_loss_percent_(ms_per_loss_percent) {} + +void ReorderOptimizer::Update(int relative_delay_ms, + bool reordered, + int base_delay_ms) { + const int index = reordered ? relative_delay_ms / kBucketSizeMs : 0; + if (index < histogram_.NumBuckets()) { + // Maximum delay to register is 2000 ms. + histogram_.Add(index); + } + int bucket_index = MinimizeCostFunction(base_delay_ms); + optimal_delay_ms_ = (1 + bucket_index) * kBucketSizeMs; +} + +void ReorderOptimizer::Reset() { + histogram_.Reset(); + optimal_delay_ms_.reset(); +} + +int ReorderOptimizer::MinimizeCostFunction(int base_delay_ms) const { + const std::vector& buckets = histogram_.buckets(); + + // Values are calculated in Q30. + int64_t loss_probability = 1 << 30; + int64_t min_cost = std::numeric_limits::max(); + int min_bucket = 0; + for (int i = 0; i < static_cast(buckets.size()); ++i) { + loss_probability -= buckets[i]; + int64_t delay_ms = + static_cast(std::max(0, i * kBucketSizeMs - base_delay_ms)) + << 30; + int64_t cost = delay_ms + 100 * ms_per_loss_percent_ * loss_probability; + + if (cost < min_cost) { + min_cost = cost; + min_bucket = i; + } + if (loss_probability == 0) { + break; + } + } + + return min_bucket; +} + +} // namespace webrtc diff --git a/modules/audio_coding/neteq/reorder_optimizer.h b/modules/audio_coding/neteq/reorder_optimizer.h new file mode 100644 index 0000000000..06f6bc7e50 --- /dev/null +++ b/modules/audio_coding/neteq/reorder_optimizer.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_CODING_NETEQ_REORDER_OPTIMIZER_H_ +#define MODULES_AUDIO_CODING_NETEQ_REORDER_OPTIMIZER_H_ + +#include "absl/types/optional.h" +#include "modules/audio_coding/neteq/histogram.h" + +namespace webrtc { + +// Calculates an optimal delay to reduce the chance of missing reordered +// packets. The delay/loss trade-off can be tune using the `ms_per_loss_percent` +// parameter. +class ReorderOptimizer { + public: + ReorderOptimizer(int forget_factor, + int ms_per_loss_percent, + absl::optional start_forget_weight); + + void Update(int relative_delay_ms, bool reordered, int base_delay_ms); + + absl::optional GetOptimalDelayMs() const { return optimal_delay_ms_; } + + void Reset(); + + private: + int MinimizeCostFunction(int base_delay_ms) const; + + Histogram histogram_; + const int ms_per_loss_percent_; + absl::optional optimal_delay_ms_; +}; + +} // namespace webrtc +#endif // MODULES_AUDIO_CODING_NETEQ_REORDER_OPTIMIZER_H_ diff --git a/modules/audio_coding/neteq/reorder_optimizer_unittest.cc b/modules/audio_coding/neteq/reorder_optimizer_unittest.cc new file mode 100644 index 0000000000..aaa1062560 --- /dev/null +++ b/modules/audio_coding/neteq/reorder_optimizer_unittest.cc @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_coding/neteq/reorder_optimizer.h" + +#include "test/gtest.h" + +namespace webrtc { + +namespace { + +constexpr int kForgetFactor = 32745; // 0.9993 in Q15. +constexpr int kMsPerLossPercent = 20; +constexpr int kStartForgetWeight = 1; + +} // namespace + +TEST(ReorderOptimizerTest, OnlyIncreaseDelayForReorderedPackets) { + ReorderOptimizer reorder_optimizer(kForgetFactor, kMsPerLossPercent, + kStartForgetWeight); + EXPECT_FALSE(reorder_optimizer.GetOptimalDelayMs()); + + // Delay should not increase for in-order packets. + reorder_optimizer.Update(60, /*reordered=*/false, 0); + EXPECT_EQ(reorder_optimizer.GetOptimalDelayMs(), 20); + + reorder_optimizer.Update(100, /*reordered=*/false, 0); + EXPECT_EQ(reorder_optimizer.GetOptimalDelayMs(), 20); + + reorder_optimizer.Update(80, /*reordered=*/true, 0); + EXPECT_EQ(reorder_optimizer.GetOptimalDelayMs(), 100); +} + +TEST(ReorderOptimizerTest, AvoidIncreasingDelayWhenProbabilityIsLow) { + ReorderOptimizer reorder_optimizer(kForgetFactor, kMsPerLossPercent, + kStartForgetWeight); + + reorder_optimizer.Update(40, /*reordered=*/true, 0); + reorder_optimizer.Update(40, /*reordered=*/true, 0); + reorder_optimizer.Update(40, /*reordered=*/true, 0); + EXPECT_EQ(reorder_optimizer.GetOptimalDelayMs(), 60); + + // The cost of the delay is too high relative the probability. + reorder_optimizer.Update(600, /*reordered=*/true, 0); + EXPECT_EQ(reorder_optimizer.GetOptimalDelayMs(), 60); +} + +TEST(ReorderOptimizerTest, BaseDelayIsSubtractedFromCost) { + constexpr int kBaseDelayMs = 200; + ReorderOptimizer reorder_optimizer(kForgetFactor, kMsPerLossPercent, + kStartForgetWeight); + + reorder_optimizer.Update(40, /*reordered=*/true, kBaseDelayMs); + reorder_optimizer.Update(40, /*reordered=*/true, kBaseDelayMs); + reorder_optimizer.Update(40, /*reordered=*/true, kBaseDelayMs); + EXPECT_EQ(reorder_optimizer.GetOptimalDelayMs(), 60); + + // The cost of the delay is too high relative the probability. + reorder_optimizer.Update(600, /*reordered=*/true, kBaseDelayMs); + EXPECT_EQ(reorder_optimizer.GetOptimalDelayMs(), 620); +} + +} // namespace webrtc diff --git a/modules/audio_coding/neteq/statistics_calculator.cc b/modules/audio_coding/neteq/statistics_calculator.cc index 12a0e3c9ec..8e281302b7 100644 --- a/modules/audio_coding/neteq/statistics_calculator.cc +++ b/modules/audio_coding/neteq/statistics_calculator.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/neteq/statistics_calculator.h" -#include #include // memset #include @@ -340,7 +339,7 @@ void StatisticsCalculator::GetNetworkStatistics(size_t samples_per_packet, } else { std::sort(waiting_times_.begin(), waiting_times_.end()); // Find mid-point elements. If the size is odd, the two values - // |middle_left| and |middle_right| will both be the one middle element; if + // `middle_left` and `middle_right` will both be the one middle element; if // the size is even, they will be the the two neighboring elements at the // middle of the list. const int middle_left = waiting_times_[(waiting_times_.size() - 1) / 2]; diff --git a/modules/audio_coding/neteq/statistics_calculator.h b/modules/audio_coding/neteq/statistics_calculator.h index f0c2734423..269e6a09b2 100644 --- a/modules/audio_coding/neteq/statistics_calculator.h +++ b/modules/audio_coding/neteq/statistics_calculator.h @@ -15,7 +15,6 @@ #include #include "api/neteq/neteq.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -28,22 +27,25 @@ class StatisticsCalculator { virtual ~StatisticsCalculator(); + StatisticsCalculator(const StatisticsCalculator&) = delete; + StatisticsCalculator& operator=(const StatisticsCalculator&) = delete; + // Resets most of the counters. void Reset(); // Resets the counters that are not handled by Reset(). void ResetMcu(); - // Reports that |num_samples| samples were produced through expansion, and + // Reports that `num_samples` samples were produced through expansion, and // that the expansion produced other than just noise samples. void ExpandedVoiceSamples(size_t num_samples, bool is_new_concealment_event); - // Reports that |num_samples| samples were produced through expansion, and + // Reports that `num_samples` samples were produced through expansion, and // that the expansion produced only noise samples. void ExpandedNoiseSamples(size_t num_samples, bool is_new_concealment_event); // Corrects the statistics for number of samples produced through non-noise - // expansion by adding |num_samples| (negative or positive) to the current + // expansion by adding `num_samples` (negative or positive) to the current // value. The result is capped to zero to avoid negative values. void ExpandedVoiceSamplesCorrection(int num_samples); @@ -55,24 +57,24 @@ class StatisticsCalculator { // Mark end of expand event; triggers some stats to be reported. void EndExpandEvent(int fs_hz); - // Reports that |num_samples| samples were produced through preemptive + // Reports that `num_samples` samples were produced through preemptive // expansion. void PreemptiveExpandedSamples(size_t num_samples); - // Reports that |num_samples| samples were removed through accelerate. + // Reports that `num_samples` samples were removed through accelerate. void AcceleratedSamples(size_t num_samples); - // Reports that |num_packets| packets were discarded. + // Reports that `num_packets` packets were discarded. virtual void PacketsDiscarded(size_t num_packets); - // Reports that |num_packets| secondary (FEC) packets were discarded. + // Reports that `num_packets` secondary (FEC) packets were discarded. virtual void SecondaryPacketsDiscarded(size_t num_packets); - // Reports that |num_packets| secondary (FEC) packets were received. + // Reports that `num_packets` secondary (FEC) packets were received. virtual void SecondaryPacketsReceived(size_t num_packets); - // Increases the report interval counter with |num_samples| at a sample rate - // of |fs_hz|. This is how the StatisticsCalculator gets notified that current + // Increases the report interval counter with `num_samples` at a sample rate + // of `fs_hz`. This is how the StatisticsCalculator gets notified that current // time is increasing. void IncreaseCounter(size_t num_samples, int fs_hz); @@ -84,7 +86,7 @@ class StatisticsCalculator { // Stores new packet waiting time in waiting time statistics. void StoreWaitingTime(int waiting_time_ms); - // Reports that |num_samples| samples were decoded from secondary packets. + // Reports that `num_samples` samples were decoded from secondary packets. void SecondaryDecodedSamples(int num_samples); // Reports that the packet buffer was flushed. @@ -93,17 +95,17 @@ class StatisticsCalculator { // Reports that the jitter buffer received a packet. void ReceivedPacket(); - // Reports that a received packet was delayed by |delay_ms| milliseconds. + // Reports that a received packet was delayed by `delay_ms` milliseconds. virtual void RelativePacketArrivalDelay(size_t delay_ms); - // Logs a delayed packet outage event of |num_samples| expanded at a sample - // rate of |fs_hz|. A delayed packet outage event is defined as an expand + // Logs a delayed packet outage event of `num_samples` expanded at a sample + // rate of `fs_hz`. A delayed packet outage event is defined as an expand // period caused not by an actual packet loss, but by a delayed packet. virtual void LogDelayedPacketOutageEvent(int num_samples, int fs_hz); - // Returns the current network statistics in |stats|. The number of samples - // per packet is |samples_per_packet|. The method does not populate - // |preferred_buffer_size_ms|, |jitter_peaks_found| or |clockdrift_ppm|; use + // Returns the current network statistics in `stats`. The number of samples + // per packet is `samples_per_packet`. The method does not populate + // `preferred_buffer_size_ms`, `jitter_peaks_found` or `clockdrift_ppm`; use // the PopulateDelayManagerStats method for those. void GetNetworkStatistics(size_t samples_per_packet, NetEqNetworkStatistics* stats); @@ -197,8 +199,6 @@ class StatisticsCalculator { PeriodicUmaAverage excess_buffer_delay_; PeriodicUmaCount buffer_full_counter_; bool decoded_output_played_ = false; - - RTC_DISALLOW_COPY_AND_ASSIGN(StatisticsCalculator); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/statistics_calculator_unittest.cc b/modules/audio_coding/neteq/statistics_calculator_unittest.cc index e6a9c26c4b..a15cac4ebf 100644 --- a/modules/audio_coding/neteq/statistics_calculator_unittest.cc +++ b/modules/audio_coding/neteq/statistics_calculator_unittest.cc @@ -179,4 +179,37 @@ TEST(StatisticsCalculator, InterruptionCounterDoNotLogBeforeDecoding) { EXPECT_EQ(1, lts.interruption_count); } +// Test that |discarded_primary_packets| as reported from +// |GetOperationsAndState| always matches the arguments to |PacketsDiscarded| +// accumulated. +TEST(StatisticsCalculator, DiscardedPackets) { + StatisticsCalculator statistics_calculator; + EXPECT_EQ( + 0u, + statistics_calculator.GetOperationsAndState().discarded_primary_packets); + + statistics_calculator.PacketsDiscarded(1); + EXPECT_EQ( + 1u, + statistics_calculator.GetOperationsAndState().discarded_primary_packets); + + statistics_calculator.PacketsDiscarded(10); + EXPECT_EQ( + 11u, + statistics_calculator.GetOperationsAndState().discarded_primary_packets); + + // Calling |SecondaryPacketsDiscarded| does not modify + // |discarded_primary_packets|. + statistics_calculator.SecondaryPacketsDiscarded(1); + EXPECT_EQ( + 11u, + statistics_calculator.GetOperationsAndState().discarded_primary_packets); + + // Calling |FlushedPacketBuffer| does not modify |discarded_primary_packets|. + statistics_calculator.FlushedPacketBuffer(); + EXPECT_EQ( + 11u, + statistics_calculator.GetOperationsAndState().discarded_primary_packets); +} + } // namespace webrtc diff --git a/modules/audio_coding/neteq/sync_buffer.cc b/modules/audio_coding/neteq/sync_buffer.cc index 73e0628ea6..7d7cac7157 100644 --- a/modules/audio_coding/neteq/sync_buffer.cc +++ b/modules/audio_coding/neteq/sync_buffer.cc @@ -28,7 +28,7 @@ void SyncBuffer::PushBack(const AudioMultiVector& append_this) { next_index_ -= samples_added; } else { // This means that we are pushing out future data that was never used. - // RTC_NOTREACHED(); + // RTC_DCHECK_NOTREACHED(); // TODO(hlundin): This assert must be disabled to support 60 ms frames. // This should not happen even for 60 ms frames, but it does. Investigate // why. @@ -59,11 +59,11 @@ void SyncBuffer::InsertZerosAtIndex(size_t length, size_t position) { channels_[channel]->InsertZerosAt(length, position); } if (next_index_ >= position) { - // We are moving the |next_index_| sample. + // We are moving the `next_index_` sample. set_next_index(next_index_ + length); // Overflow handled by subfunction. } if (dtmf_index_ > 0 && dtmf_index_ >= position) { - // We are moving the |dtmf_index_| sample. + // We are moving the `dtmf_index_` sample. set_dtmf_index(dtmf_index_ + length); // Overflow handled by subfunction. } } @@ -71,7 +71,7 @@ void SyncBuffer::InsertZerosAtIndex(size_t length, size_t position) { void SyncBuffer::ReplaceAtIndex(const AudioMultiVector& insert_this, size_t length, size_t position) { - position = std::min(position, Size()); // Cap |position| in the valid range. + position = std::min(position, Size()); // Cap `position` in the valid range. length = std::min(length, Size() - position); AudioMultiVector::OverwriteAt(insert_this, length, position); } @@ -106,12 +106,12 @@ void SyncBuffer::Flush() { } void SyncBuffer::set_next_index(size_t value) { - // Cannot set |next_index_| larger than the size of the buffer. + // Cannot set `next_index_` larger than the size of the buffer. next_index_ = std::min(value, Size()); } void SyncBuffer::set_dtmf_index(size_t value) { - // Cannot set |dtmf_index_| larger than the size of the buffer. + // Cannot set `dtmf_index_` larger than the size of the buffer. dtmf_index_ = std::min(value, Size()); } diff --git a/modules/audio_coding/neteq/sync_buffer.h b/modules/audio_coding/neteq/sync_buffer.h index 754716be49..cf56c432e3 100644 --- a/modules/audio_coding/neteq/sync_buffer.h +++ b/modules/audio_coding/neteq/sync_buffer.h @@ -20,7 +20,6 @@ #include "modules/audio_coding/neteq/audio_multi_vector.h" #include "modules/audio_coding/neteq/audio_vector.h" #include "rtc_base/buffer.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -32,58 +31,61 @@ class SyncBuffer : public AudioMultiVector { end_timestamp_(0), dtmf_index_(0) {} + SyncBuffer(const SyncBuffer&) = delete; + SyncBuffer& operator=(const SyncBuffer&) = delete; + // Returns the number of samples yet to play out from the buffer. size_t FutureLength() const; - // Adds the contents of |append_this| to the back of the SyncBuffer. Removes + // Adds the contents of `append_this` to the back of the SyncBuffer. Removes // the same number of samples from the beginning of the SyncBuffer, to - // maintain a constant buffer size. The |next_index_| is updated to reflect + // maintain a constant buffer size. The `next_index_` is updated to reflect // the move of the beginning of "future" data. void PushBack(const AudioMultiVector& append_this) override; // Like PushBack, but reads the samples channel-interleaved from the input. void PushBackInterleaved(const rtc::BufferT& append_this); - // Adds |length| zeros to the beginning of each channel. Removes + // Adds `length` zeros to the beginning of each channel. Removes // the same number of samples from the end of the SyncBuffer, to - // maintain a constant buffer size. The |next_index_| is updated to reflect + // maintain a constant buffer size. The `next_index_` is updated to reflect // the move of the beginning of "future" data. // Note that this operation may delete future samples that are waiting to // be played. void PushFrontZeros(size_t length); - // Inserts |length| zeros into each channel at index |position|. The size of - // the SyncBuffer is kept constant, which means that the last |length| + // Inserts `length` zeros into each channel at index `position`. The size of + // the SyncBuffer is kept constant, which means that the last `length` // elements in each channel will be purged. virtual void InsertZerosAtIndex(size_t length, size_t position); // Overwrites each channel in this SyncBuffer with values taken from - // |insert_this|. The values are taken from the beginning of |insert_this| and - // are inserted starting at |position|. |length| values are written into each - // channel. The size of the SyncBuffer is kept constant. That is, if |length| - // and |position| are selected such that the new data would extend beyond the + // `insert_this`. The values are taken from the beginning of `insert_this` and + // are inserted starting at `position`. `length` values are written into each + // channel. The size of the SyncBuffer is kept constant. That is, if `length` + // and `position` are selected such that the new data would extend beyond the // end of the current SyncBuffer, the buffer is not extended. - // The |next_index_| is not updated. + // The `next_index_` is not updated. virtual void ReplaceAtIndex(const AudioMultiVector& insert_this, size_t length, size_t position); - // Same as the above method, but where all of |insert_this| is written (with + // Same as the above method, but where all of `insert_this` is written (with // the same constraints as above, that the SyncBuffer is not extended). virtual void ReplaceAtIndex(const AudioMultiVector& insert_this, size_t position); - // Reads |requested_len| samples from each channel and writes them interleaved - // into |output|. The |next_index_| is updated to point to the sample to read - // next time. The AudioFrame |output| is first reset, and the |data_|, - // |num_channels_|, and |samples_per_channel_| fields are updated. + // Reads `requested_len` samples from each channel and writes them interleaved + // into `output`. The `next_index_` is updated to point to the sample to read + // next time. The AudioFrame `output` is first reset, and the `data_`, + // `num_channels_`, and `samples_per_channel_` fields are updated. void GetNextAudioInterleaved(size_t requested_len, AudioFrame* output); - // Adds |increment| to |end_timestamp_|. + // Adds `increment` to `end_timestamp_`. void IncreaseEndTimestamp(uint32_t increment); // Flushes the buffer. The buffer will contain only zeros after the flush, and - // |next_index_| will point to the end, like when the buffer was first + // `next_index_` will point to the end, like when the buffer was first // created. void Flush(); @@ -102,8 +104,6 @@ class SyncBuffer : public AudioMultiVector { size_t next_index_; uint32_t end_timestamp_; // The timestamp of the last sample in the buffer. size_t dtmf_index_; // Index to the first non-DTMF sample in the buffer. - - RTC_DISALLOW_COPY_AND_ASSIGN(SyncBuffer); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/sync_buffer_unittest.cc b/modules/audio_coding/neteq/sync_buffer_unittest.cc index 860dbae964..bdcd92446b 100644 --- a/modules/audio_coding/neteq/sync_buffer_unittest.cc +++ b/modules/audio_coding/neteq/sync_buffer_unittest.cc @@ -55,18 +55,18 @@ TEST(SyncBuffer, PushBackAndFlush) { SyncBuffer sync_buffer(kChannels, kLen); static const size_t kNewLen = 10; AudioMultiVector new_data(kChannels, kNewLen); - // Populate |new_data|. + // Populate `new_data`. for (size_t channel = 0; channel < kChannels; ++channel) { for (size_t i = 0; i < kNewLen; ++i) { new_data[channel][i] = rtc::checked_cast(i); } } - // Push back |new_data| into |sync_buffer|. This operation should pop out - // data from the front of |sync_buffer|, so that the size of the buffer - // remains the same. The |next_index_| should also move with the same length. + // Push back `new_data` into `sync_buffer`. This operation should pop out + // data from the front of `sync_buffer`, so that the size of the buffer + // remains the same. The `next_index_` should also move with the same length. sync_buffer.PushBack(new_data); ASSERT_EQ(kLen, sync_buffer.Size()); - // Verify that |next_index_| moved accordingly. + // Verify that `next_index_` moved accordingly. EXPECT_EQ(kLen - kNewLen, sync_buffer.next_index()); // Verify the new contents. for (size_t channel = 0; channel < kChannels; ++channel) { @@ -95,7 +95,7 @@ TEST(SyncBuffer, PushFrontZeros) { SyncBuffer sync_buffer(kChannels, kLen); static const size_t kNewLen = 10; AudioMultiVector new_data(kChannels, kNewLen); - // Populate |new_data|. + // Populate `new_data`. for (size_t channel = 0; channel < kChannels; ++channel) { for (size_t i = 0; i < kNewLen; ++i) { new_data[channel][i] = rtc::checked_cast(1000 + i); @@ -104,10 +104,10 @@ TEST(SyncBuffer, PushFrontZeros) { sync_buffer.PushBack(new_data); EXPECT_EQ(kLen, sync_buffer.Size()); - // Push |kNewLen| - 1 zeros into each channel in the front of the SyncBuffer. + // Push `kNewLen` - 1 zeros into each channel in the front of the SyncBuffer. sync_buffer.PushFrontZeros(kNewLen - 1); EXPECT_EQ(kLen, sync_buffer.Size()); // Size should remain the same. - // Verify that |next_index_| moved accordingly. Should be at the end - 1. + // Verify that `next_index_` moved accordingly. Should be at the end - 1. EXPECT_EQ(kLen - 1, sync_buffer.next_index()); // Verify the zeros. for (size_t channel = 0; channel < kChannels; ++channel) { @@ -128,22 +128,22 @@ TEST(SyncBuffer, GetNextAudioInterleaved) { SyncBuffer sync_buffer(kChannels, kLen); static const size_t kNewLen = 10; AudioMultiVector new_data(kChannels, kNewLen); - // Populate |new_data|. + // Populate `new_data`. for (size_t channel = 0; channel < kChannels; ++channel) { for (size_t i = 0; i < kNewLen; ++i) { new_data[channel][i] = rtc::checked_cast(i); } } - // Push back |new_data| into |sync_buffer|. This operation should pop out - // data from the front of |sync_buffer|, so that the size of the buffer - // remains the same. The |next_index_| should also move with the same length. + // Push back `new_data` into `sync_buffer`. This operation should pop out + // data from the front of `sync_buffer`, so that the size of the buffer + // remains the same. The `next_index_` should also move with the same length. sync_buffer.PushBack(new_data); // Read to interleaved output. Read in two batches, where each read operation - // should automatically update the |net_index_| in the SyncBuffer. - // Note that |samples_read| is the number of samples read from each channel. - // That is, the number of samples written to |output| is - // |samples_read| * |kChannels|. + // should automatically update the `net_index_` in the SyncBuffer. + // Note that `samples_read` is the number of samples read from each channel. + // That is, the number of samples written to `output` is + // `samples_read` * `kChannels`. AudioFrame output1; sync_buffer.GetNextAudioInterleaved(kNewLen / 2, &output1); EXPECT_EQ(kChannels, output1.num_channels_); diff --git a/modules/audio_coding/neteq/test/neteq_decoding_test.cc b/modules/audio_coding/neteq/test/neteq_decoding_test.cc index 11b1b1a532..6f27cdad4f 100644 --- a/modules/audio_coding/neteq/test/neteq_decoding_test.cc +++ b/modules/audio_coding/neteq/test/neteq_decoding_test.cc @@ -193,7 +193,7 @@ void NetEqDecodingTest::PopulateRtpInfo(int frame_index, rtp_info->timestamp = timestamp; rtp_info->ssrc = 0x1234; // Just an arbitrary SSRC. rtp_info->payloadType = 94; // PCM16b WB codec. - rtp_info->markerBit = 0; + rtp_info->markerBit = false; } void NetEqDecodingTest::PopulateCng(int frame_index, @@ -205,7 +205,7 @@ void NetEqDecodingTest::PopulateCng(int frame_index, rtp_info->timestamp = timestamp; rtp_info->ssrc = 0x1234; // Just an arbitrary SSRC. rtp_info->payloadType = 98; // WB CNG. - rtp_info->markerBit = 0; + rtp_info->markerBit = false; payload[0] = 64; // Noise level -64 dBov, quite arbitrarily chosen. *payload_len = 1; // Only noise level, no spectral parameters. } @@ -222,7 +222,6 @@ void NetEqDecodingTest::WrapTest(uint16_t start_seq_no, const int kSamples = kBlockSize16kHz * kBlocksPerFrame; const size_t kPayloadBytes = kSamples * sizeof(int16_t); double next_input_time_ms = 0.0; - uint32_t receive_timestamp = 0; // Insert speech for 2 seconds. const int kSpeechDurationMs = 2000; @@ -246,21 +245,14 @@ void NetEqDecodingTest::WrapTest(uint16_t start_seq_no, NetEqNetworkStatistics network_stats; ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats)); - // Due to internal NetEq logic, preferred buffer-size is about 4 times the - // packet size for first few packets. Therefore we refrain from checking - // the criteria. - if (packets_inserted > 4) { - // Expect preferred and actual buffer size to be no more than 2 frames. - EXPECT_LE(network_stats.preferred_buffer_size_ms, kFrameSizeMs * 2); - EXPECT_LE(network_stats.current_buffer_size_ms, - kFrameSizeMs * 2 + algorithmic_delay_ms_); - } + EXPECT_LE(network_stats.preferred_buffer_size_ms, 80); + EXPECT_LE(network_stats.current_buffer_size_ms, + 80 + algorithmic_delay_ms_); last_seq_no = seq_no; last_timestamp = timestamp; ++seq_no; timestamp += kSamples; - receive_timestamp += kSamples; next_input_time_ms += static_cast(kFrameSizeMs); seq_no_wrapped |= seq_no < last_seq_no; @@ -348,8 +340,8 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor, EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); if (network_freeze_ms > 0) { - // First keep pulling audio for |network_freeze_ms| without inserting - // any data, then insert CNG data corresponding to |network_freeze_ms| + // First keep pulling audio for `network_freeze_ms` without inserting + // any data, then insert CNG data corresponding to `network_freeze_ms` // without pulling any output audio. const double loop_end_time = t_ms + network_freeze_ms; for (; t_ms < loop_end_time; t_ms += 10) { @@ -359,7 +351,7 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor, EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); } bool pull_once = pull_audio_during_freeze; - // If |pull_once| is true, GetAudio will be called once half-way through + // If `pull_once` is true, GetAudio will be called once half-way through // the network recovery period. double pull_time_ms = (t_ms + next_input_time_ms) / 2; while (next_input_time_ms <= t_ms) { diff --git a/modules/audio_coding/neteq/time_stretch.cc b/modules/audio_coding/neteq/time_stretch.cc index b7680292bd..b89be0608d 100644 --- a/modules/audio_coding/neteq/time_stretch.cc +++ b/modules/audio_coding/neteq/time_stretch.cc @@ -26,7 +26,7 @@ TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input, bool fast_mode, AudioMultiVector* output, size_t* length_change_samples) { - // Pre-calculate common multiplication with |fs_mult_|. + // Pre-calculate common multiplication with `fs_mult_`. size_t fs_mult_120 = static_cast(fs_mult_ * 120); // Corresponds to 15 ms. @@ -37,8 +37,8 @@ TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input, signal = input; signal_len = input_len; } else { - // We want |signal| to be only the first channel of |input|, which is - // interleaved. Thus, we take the first sample, skip forward |num_channels| + // We want `signal` to be only the first channel of `input`, which is + // interleaved. Thus, we take the first sample, skip forward `num_channels` // samples, and continue like that. signal_len = input_len / num_channels_; signal_array.reset(new int16_t[signal_len]); @@ -65,37 +65,37 @@ TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input, int16_t peak_value; DspHelper::PeakDetection(auto_correlation_, kCorrelationLen, kNumPeaks, fs_mult_, &peak_index, &peak_value); - // Assert that |peak_index| stays within boundaries. + // Assert that `peak_index` stays within boundaries. RTC_DCHECK_LE(peak_index, (2 * kCorrelationLen - 1) * fs_mult_); // Compensate peak_index for displaced starting position. The displacement - // happens in AutoCorrelation(). Here, |kMinLag| is in the down-sampled 4 kHz - // domain, while the |peak_index| is in the original sample rate; hence, the + // happens in AutoCorrelation(). Here, `kMinLag` is in the down-sampled 4 kHz + // domain, while the `peak_index` is in the original sample rate; hence, the // multiplication by fs_mult_ * 2. peak_index += kMinLag * fs_mult_ * 2; - // Assert that |peak_index| stays within boundaries. + // Assert that `peak_index` stays within boundaries. RTC_DCHECK_GE(peak_index, static_cast(20 * fs_mult_)); RTC_DCHECK_LE(peak_index, 20 * fs_mult_ + (2 * kCorrelationLen - 1) * fs_mult_); - // Calculate scaling to ensure that |peak_index| samples can be square-summed + // Calculate scaling to ensure that `peak_index` samples can be square-summed // without overflowing. int scaling = 31 - WebRtcSpl_NormW32(max_input_value_ * max_input_value_) - WebRtcSpl_NormW32(static_cast(peak_index)); scaling = std::max(0, scaling); - // |vec1| starts at 15 ms minus one pitch period. + // `vec1` starts at 15 ms minus one pitch period. const int16_t* vec1 = &signal[fs_mult_120 - peak_index]; - // |vec2| start at 15 ms. + // `vec2` start at 15 ms. const int16_t* vec2 = &signal[fs_mult_120]; - // Calculate energies for |vec1| and |vec2|, assuming they both contain - // |peak_index| samples. + // Calculate energies for `vec1` and `vec2`, assuming they both contain + // `peak_index` samples. int32_t vec1_energy = WebRtcSpl_DotProductWithScale(vec1, vec1, peak_index, scaling); int32_t vec2_energy = WebRtcSpl_DotProductWithScale(vec2, vec2, peak_index, scaling); - // Calculate cross-correlation between |vec1| and |vec2|. + // Calculate cross-correlation between `vec1` and `vec2`. int32_t cross_corr = WebRtcSpl_DotProductWithScale(vec1, vec2, peak_index, scaling); @@ -135,7 +135,7 @@ TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input, cross_corr = WEBRTC_SPL_SHIFT_W32(cross_corr, temp_scale); cross_corr = std::max(0, cross_corr); // Don't use if negative. best_correlation = WebRtcSpl_DivW32W16(cross_corr, sqrt_energy_prod); - // Make sure |best_correlation| is no larger than 1 in Q14. + // Make sure `best_correlation` is no larger than 1 in Q14. best_correlation = std::min(static_cast(16384), best_correlation); } @@ -165,7 +165,7 @@ void TimeStretch::AutoCorrelation() { &downsampled_input_[kMaxLag], &downsampled_input_[kMaxLag - kMinLag], kCorrelationLen, kMaxLag - kMinLag, -1, auto_corr); - // Normalize correlation to 14 bits and write to |auto_correlation_|. + // Normalize correlation to 14 bits and write to `auto_correlation_`. int32_t max_corr = WebRtcSpl_MaxAbsValueW32(auto_corr, kCorrelationLen); int scaling = std::max(0, 17 - WebRtcSpl_NormW32(max_corr)); WebRtcSpl_VectorBitShiftW32ToW16(auto_correlation_, kCorrelationLen, @@ -182,8 +182,8 @@ bool TimeStretch::SpeechDetection(int32_t vec1_energy, // active speech. // Rewrite the inequality as: // (vec1_energy + vec2_energy) / 16 <= peak_index * background_noise_energy. - // The two sides of the inequality will be denoted |left_side| and - // |right_side|. + // The two sides of the inequality will be denoted `left_side` and + // `right_side`. int32_t left_side = rtc::saturated_cast( (static_cast(vec1_energy) + vec2_energy) / 16); int32_t right_side; @@ -199,11 +199,11 @@ bool TimeStretch::SpeechDetection(int32_t vec1_energy, right_side = rtc::dchecked_cast(peak_index) * (right_side >> right_scale); - // Scale |left_side| properly before comparing with |right_side|. - // (|scaling| is the scale factor before energy calculation, thus the scale + // Scale `left_side` properly before comparing with `right_side`. + // (`scaling` is the scale factor before energy calculation, thus the scale // factor for the energy is 2 * scaling.) if (WebRtcSpl_NormW32(left_side) < 2 * scaling) { - // Cannot scale only |left_side|, must scale |right_side| too. + // Cannot scale only `left_side`, must scale `right_side` too. int temp_scale = WebRtcSpl_NormW32(left_side); left_side = left_side << temp_scale; right_side = right_side >> (2 * scaling - temp_scale); diff --git a/modules/audio_coding/neteq/time_stretch.h b/modules/audio_coding/neteq/time_stretch.h index 26d295f669..f0ddaebeca 100644 --- a/modules/audio_coding/neteq/time_stretch.h +++ b/modules/audio_coding/neteq/time_stretch.h @@ -11,11 +11,9 @@ #ifndef MODULES_AUDIO_CODING_NETEQ_TIME_STRETCH_H_ #define MODULES_AUDIO_CODING_NETEQ_TIME_STRETCH_H_ -#include #include // memset, size_t #include "modules/audio_coding/neteq/audio_multi_vector.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -50,6 +48,9 @@ class TimeStretch { virtual ~TimeStretch() {} + TimeStretch(const TimeStretch&) = delete; + TimeStretch& operator=(const TimeStretch&) = delete; + // This method performs the processing common to both Accelerate and // PreemptiveExpand. ReturnCodes Process(const int16_t* input, @@ -59,7 +60,7 @@ class TimeStretch { size_t* length_change_samples); protected: - // Sets the parameters |best_correlation| and |peak_index| to suitable + // Sets the parameters `best_correlation` and `peak_index` to suitable // values when the signal contains no active speech. This method must be // implemented by the sub-classes. virtual void SetParametersForPassiveSpeech(size_t input_length, @@ -92,13 +93,13 @@ class TimeStretch { const BackgroundNoise& background_noise_; int16_t max_input_value_; int16_t downsampled_input_[kDownsampledLen]; - // Adding 1 to the size of |auto_correlation_| because of how it is used + // Adding 1 to the size of `auto_correlation_` because of how it is used // by the peak-detection algorithm. int16_t auto_correlation_[kCorrelationLen + 1]; private: - // Calculates the auto-correlation of |downsampled_input_| and writes the - // result to |auto_correlation_|. + // Calculates the auto-correlation of `downsampled_input_` and writes the + // result to `auto_correlation_`. void AutoCorrelation(); // Performs a simple voice-activity detection based on the input parameters. @@ -106,8 +107,6 @@ class TimeStretch { int32_t vec2_energy, size_t peak_index, int scaling) const; - - RTC_DISALLOW_COPY_AND_ASSIGN(TimeStretch); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/timestamp_scaler.cc b/modules/audio_coding/neteq/timestamp_scaler.cc index b0461bb92d..59177d027f 100644 --- a/modules/audio_coding/neteq/timestamp_scaler.cc +++ b/modules/audio_coding/neteq/timestamp_scaler.cc @@ -79,7 +79,7 @@ uint32_t TimestampScaler::ToExternal(uint32_t internal_timestamp) const { const int64_t internal_diff = int64_t{internal_timestamp} - internal_ref_; RTC_DCHECK_GT(numerator_, 0); // Do not update references in this method. - // Switch |denominator_| and |numerator_| to convert the other way. + // Switch `denominator_` and `numerator_` to convert the other way. return external_ref_ + (internal_diff * denominator_) / numerator_; } } diff --git a/modules/audio_coding/neteq/timestamp_scaler.h b/modules/audio_coding/neteq/timestamp_scaler.h index 93cb953e90..f42ce7207a 100644 --- a/modules/audio_coding/neteq/timestamp_scaler.h +++ b/modules/audio_coding/neteq/timestamp_scaler.h @@ -12,7 +12,6 @@ #define MODULES_AUDIO_CODING_NETEQ_TIMESTAMP_SCALER_H_ #include "modules/audio_coding/neteq/packet.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -34,18 +33,21 @@ class TimestampScaler { virtual ~TimestampScaler() {} + TimestampScaler(const TimestampScaler&) = delete; + TimestampScaler& operator=(const TimestampScaler&) = delete; + // Start over. virtual void Reset(); - // Scale the timestamp in |packet| from external to internal. + // Scale the timestamp in `packet` from external to internal. virtual void ToInternal(Packet* packet); - // Scale the timestamp for all packets in |packet_list| from external to + // Scale the timestamp for all packets in `packet_list` from external to // internal. virtual void ToInternal(PacketList* packet_list); - // Returns the internal equivalent of |external_timestamp|, given the - // RTP payload type |rtp_payload_type|. + // Returns the internal equivalent of `external_timestamp`, given the + // RTP payload type `rtp_payload_type`. virtual uint32_t ToInternal(uint32_t external_timestamp, uint8_t rtp_payload_type); @@ -59,8 +61,6 @@ class TimestampScaler { uint32_t external_ref_; uint32_t internal_ref_; const DecoderDatabase& decoder_database_; - - RTC_DISALLOW_COPY_AND_ASSIGN(TimestampScaler); }; } // namespace webrtc diff --git a/modules/audio_coding/neteq/timestamp_scaler_unittest.cc b/modules/audio_coding/neteq/timestamp_scaler_unittest.cc index 9ba63e3780..26dc06db5e 100644 --- a/modules/audio_coding/neteq/timestamp_scaler_unittest.cc +++ b/modules/audio_coding/neteq/timestamp_scaler_unittest.cc @@ -58,7 +58,7 @@ TEST(TimestampScaler, TestNoScalingLargeStep) { // Test both sides of the timestamp wrap-around. static const uint32_t kStep = 160; uint32_t start_timestamp = 0; - // |external_timestamp| will be a large positive value. + // `external_timestamp` will be a large positive value. start_timestamp = start_timestamp - 5 * kStep; for (uint32_t timestamp = start_timestamp; timestamp != 5 * kStep; timestamp += kStep) { @@ -111,7 +111,7 @@ TEST(TimestampScaler, TestG722LargeStep) { // Test both sides of the timestamp wrap-around. static const uint32_t kStep = 320; uint32_t external_timestamp = 0; - // |external_timestamp| will be a large positive value. + // `external_timestamp` will be a large positive value. external_timestamp = external_timestamp - 5 * kStep; uint32_t internal_timestamp = external_timestamp; for (; external_timestamp != 5 * kStep; external_timestamp += kStep) { @@ -290,7 +290,7 @@ TEST(TimestampScaler, TestOpusLargeStep) { // Test both sides of the timestamp wrap-around. static const uint32_t kStep = 960; uint32_t external_timestamp = 0; - // |external_timestamp| will be a large positive value. + // `external_timestamp` will be a large positive value. external_timestamp = external_timestamp - 5 * kStep; uint32_t internal_timestamp = external_timestamp; for (; external_timestamp != 5 * kStep; external_timestamp += kStep) { diff --git a/modules/audio_coding/neteq/tools/audio_checksum.h b/modules/audio_coding/neteq/tools/audio_checksum.h index e4306fa036..9d6f3432c0 100644 --- a/modules/audio_coding/neteq/tools/audio_checksum.h +++ b/modules/audio_coding/neteq/tools/audio_checksum.h @@ -16,7 +16,6 @@ #include "modules/audio_coding/neteq/tools/audio_sink.h" #include "rtc_base/buffer.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/message_digest.h" #include "rtc_base/string_encode.h" #include "rtc_base/system/arch.h" @@ -31,6 +30,9 @@ class AudioChecksum : public AudioSink { checksum_result_(checksum_->Size()), finished_(false) {} + AudioChecksum(const AudioChecksum&) = delete; + AudioChecksum& operator=(const AudioChecksum&) = delete; + bool WriteArray(const int16_t* audio, size_t num_samples) override { if (finished_) return false; @@ -56,8 +58,6 @@ class AudioChecksum : public AudioSink { std::unique_ptr checksum_; rtc::Buffer checksum_result_; bool finished_; - - RTC_DISALLOW_COPY_AND_ASSIGN(AudioChecksum); }; } // namespace test diff --git a/modules/audio_coding/neteq/tools/audio_loop.h b/modules/audio_coding/neteq/tools/audio_loop.h index cd764cc5d0..076960a21c 100644 --- a/modules/audio_coding/neteq/tools/audio_loop.h +++ b/modules/audio_coding/neteq/tools/audio_loop.h @@ -15,7 +15,6 @@ #include #include "api/array_view.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { namespace test { @@ -29,17 +28,20 @@ class AudioLoop { virtual ~AudioLoop() {} - // Initializes the AudioLoop by reading from |file_name|. The loop will be no - // longer than |max_loop_length_samples|, if the length of the file is + AudioLoop(const AudioLoop&) = delete; + AudioLoop& operator=(const AudioLoop&) = delete; + + // Initializes the AudioLoop by reading from `file_name`. The loop will be no + // longer than `max_loop_length_samples`, if the length of the file is // greater. Otherwise, the loop length is the same as the file length. - // The audio will be delivered in blocks of |block_length_samples|. + // The audio will be delivered in blocks of `block_length_samples`. // Returns false if the initialization failed, otherwise true. - bool Init(const std::string file_name, + bool Init(std::string file_name, size_t max_loop_length_samples, size_t block_length_samples); // Returns a (pointer,size) pair for the next block of audio. The size is - // equal to the |block_length_samples| Init() argument. + // equal to the `block_length_samples` Init() argument. rtc::ArrayView GetNextBlock(); private: @@ -47,8 +49,6 @@ class AudioLoop { size_t loop_length_samples_; size_t block_length_samples_; std::unique_ptr audio_array_; - - RTC_DISALLOW_COPY_AND_ASSIGN(AudioLoop); }; } // namespace test diff --git a/modules/audio_coding/neteq/tools/audio_sink.h b/modules/audio_coding/neteq/tools/audio_sink.h index 68825ebbbf..53729fa920 100644 --- a/modules/audio_coding/neteq/tools/audio_sink.h +++ b/modules/audio_coding/neteq/tools/audio_sink.h @@ -12,7 +12,6 @@ #define MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_SINK_H_ #include "api/audio/audio_frame.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { namespace test { @@ -24,19 +23,19 @@ class AudioSink { AudioSink() {} virtual ~AudioSink() {} - // Writes |num_samples| from |audio| to the AudioSink. Returns true if + AudioSink(const AudioSink&) = delete; + AudioSink& operator=(const AudioSink&) = delete; + + // Writes `num_samples` from `audio` to the AudioSink. Returns true if // successful, otherwise false. virtual bool WriteArray(const int16_t* audio, size_t num_samples) = 0; - // Writes |audio_frame| to the AudioSink. Returns true if successful, + // Writes `audio_frame` to the AudioSink. Returns true if successful, // otherwise false. bool WriteAudioFrame(const AudioFrame& audio_frame) { return WriteArray(audio_frame.data(), audio_frame.samples_per_channel_ * audio_frame.num_channels_); } - - private: - RTC_DISALLOW_COPY_AND_ASSIGN(AudioSink); }; // Forks the output audio to two AudioSink objects. @@ -45,23 +44,25 @@ class AudioSinkFork : public AudioSink { AudioSinkFork(AudioSink* left, AudioSink* right) : left_sink_(left), right_sink_(right) {} + AudioSinkFork(const AudioSinkFork&) = delete; + AudioSinkFork& operator=(const AudioSinkFork&) = delete; + bool WriteArray(const int16_t* audio, size_t num_samples) override; private: AudioSink* left_sink_; AudioSink* right_sink_; - - RTC_DISALLOW_COPY_AND_ASSIGN(AudioSinkFork); }; // An AudioSink implementation that does nothing. class VoidAudioSink : public AudioSink { public: VoidAudioSink() = default; - bool WriteArray(const int16_t* audio, size_t num_samples) override; - private: - RTC_DISALLOW_COPY_AND_ASSIGN(VoidAudioSink); + VoidAudioSink(const VoidAudioSink&) = delete; + VoidAudioSink& operator=(const VoidAudioSink&) = delete; + + bool WriteArray(const int16_t* audio, size_t num_samples) override; }; } // namespace test diff --git a/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc b/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc index 6cbba20e5f..18a910365f 100644 --- a/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc +++ b/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc @@ -43,7 +43,7 @@ std::unique_ptr ConstantPcmPacketSource::NextPacket() { for (unsigned i = 0; i < 2 * payload_len_samples_; ++i) packet_memory[kHeaderLenBytes + i] = encoded_sample_[i % 2]; WriteHeader(packet_memory); - // |packet| assumes ownership of |packet_memory|. + // `packet` assumes ownership of `packet_memory`. auto packet = std::make_unique(std::move(packet_buffer), next_arrival_time_ms_); next_arrival_time_ms_ += payload_len_samples_ / samples_per_ms_; diff --git a/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h b/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h index 6a79ce4d1f..ab4f5c2281 100644 --- a/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h +++ b/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h @@ -16,7 +16,6 @@ #include #include "modules/audio_coding/neteq/tools/packet_source.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { namespace test { @@ -31,6 +30,9 @@ class ConstantPcmPacketSource : public PacketSource { int sample_rate_hz, int payload_type); + ConstantPcmPacketSource(const ConstantPcmPacketSource&) = delete; + ConstantPcmPacketSource& operator=(const ConstantPcmPacketSource&) = delete; + std::unique_ptr NextPacket() override; private: @@ -46,8 +48,6 @@ class ConstantPcmPacketSource : public PacketSource { uint16_t seq_number_; uint32_t timestamp_; const uint32_t payload_ssrc_; - - RTC_DISALLOW_COPY_AND_ASSIGN(ConstantPcmPacketSource); }; } // namespace test diff --git a/modules/audio_coding/neteq/tools/fake_decode_from_file.h b/modules/audio_coding/neteq/tools/fake_decode_from_file.h index 0260981565..7b53653998 100644 --- a/modules/audio_coding/neteq/tools/fake_decode_from_file.h +++ b/modules/audio_coding/neteq/tools/fake_decode_from_file.h @@ -54,9 +54,9 @@ class FakeDecodeFromFile : public AudioDecoder { int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override; - // Helper method. Writes |timestamp|, |samples| and - // |original_payload_size_bytes| to |encoded| in a format that the - // FakeDecodeFromFile decoder will understand. |encoded| must be at least 12 + // Helper method. Writes `timestamp`, `samples` and + // `original_payload_size_bytes` to `encoded` in a format that the + // FakeDecodeFromFile decoder will understand. `encoded` must be at least 12 // bytes long. static void PrepareEncoded(uint32_t timestamp, size_t samples, diff --git a/modules/audio_coding/neteq/tools/input_audio_file.cc b/modules/audio_coding/neteq/tools/input_audio_file.cc index d5e28629b4..0d9f0ed3f1 100644 --- a/modules/audio_coding/neteq/tools/input_audio_file.cc +++ b/modules/audio_coding/neteq/tools/input_audio_file.cc @@ -81,9 +81,9 @@ void InputAudioFile::DuplicateInterleaved(const int16_t* source, size_t samples, size_t channels, int16_t* destination) { - // Start from the end of |source| and |destination|, and work towards the + // Start from the end of `source` and `destination`, and work towards the // beginning. This is to allow in-place interleaving of the same array (i.e., - // |source| and |destination| are the same array). + // `source` and `destination` are the same array). for (int i = static_cast(samples - 1); i >= 0; --i) { for (int j = static_cast(channels - 1); j >= 0; --j) { destination[i * channels + j] = source[i]; diff --git a/modules/audio_coding/neteq/tools/input_audio_file.h b/modules/audio_coding/neteq/tools/input_audio_file.h index 4335a9913e..056dbf5c20 100644 --- a/modules/audio_coding/neteq/tools/input_audio_file.h +++ b/modules/audio_coding/neteq/tools/input_audio_file.h @@ -15,34 +15,35 @@ #include -#include "rtc_base/constructor_magic.h" - namespace webrtc { namespace test { // Class for handling a looping input audio file. class InputAudioFile { public: - explicit InputAudioFile(const std::string file_name, bool loop_at_end = true); + explicit InputAudioFile(std::string file_name, bool loop_at_end = true); virtual ~InputAudioFile(); - // Reads |samples| elements from source file to |destination|. Returns true + InputAudioFile(const InputAudioFile&) = delete; + InputAudioFile& operator=(const InputAudioFile&) = delete; + + // Reads `samples` elements from source file to `destination`. Returns true // if the read was successful, otherwise false. If the file end is reached, // the file is rewound and reading continues from the beginning. - // The output |destination| must have the capacity to hold |samples| elements. + // The output `destination` must have the capacity to hold `samples` elements. virtual bool Read(size_t samples, int16_t* destination); - // Fast-forwards (|samples| > 0) or -backwards (|samples| < 0) the file by the + // Fast-forwards (`samples` > 0) or -backwards (`samples` < 0) the file by the // indicated number of samples. Just like Read(), Seek() starts over at the // beginning of the file if the end is reached. However, seeking backwards // past the beginning of the file is not possible. virtual bool Seek(int samples); // Creates a multi-channel signal from a mono signal. Each sample is repeated - // |channels| times to create an interleaved multi-channel signal where all - // channels are identical. The output |destination| must have the capacity to - // hold samples * channels elements. Note that |source| and |destination| can + // `channels` times to create an interleaved multi-channel signal where all + // channels are identical. The output `destination` must have the capacity to + // hold samples * channels elements. Note that `source` and `destination` can // be the same array (i.e., point to the same address). static void DuplicateInterleaved(const int16_t* source, size_t samples, @@ -52,7 +53,6 @@ class InputAudioFile { private: FILE* fp_; const bool loop_at_end_; - RTC_DISALLOW_COPY_AND_ASSIGN(InputAudioFile); }; } // namespace test diff --git a/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h b/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h index f6b895aae0..f56ddb7cf7 100644 --- a/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h +++ b/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h @@ -47,7 +47,7 @@ class NetEqDelayAnalyzer : public test::NetEqPostInsertPacket, // as provided by CreateGraphs. void CreateMatlabScript(const std::string& script_name) const; - // Creates a python script with file name |script_name|. When executed in + // Creates a python script with file name `script_name`. When executed in // Python, the script will generate graphs with the same timing information // as provided by CreateGraphs. void CreatePythonScript(const std::string& script_name) const; diff --git a/modules/audio_coding/neteq/tools/neteq_input.h b/modules/audio_coding/neteq/tools/neteq_input.h index 732b8070f6..3a66264043 100644 --- a/modules/audio_coding/neteq/tools/neteq_input.h +++ b/modules/audio_coding/neteq/tools/neteq_input.h @@ -51,7 +51,7 @@ class NetEqInput { absl::optional NextEventTime() const { const auto a = NextPacketTime(); const auto b = NextOutputEventTime(); - // Return the minimum of non-empty |a| and |b|, or empty if both are empty. + // Return the minimum of non-empty `a` and `b`, or empty if both are empty. if (a) { return b ? std::min(*a, *b) : a; } diff --git a/modules/audio_coding/neteq/tools/neteq_performance_test.cc b/modules/audio_coding/neteq/tools/neteq_performance_test.cc index 1fb853ccb7..ccaa87b5e8 100644 --- a/modules/audio_coding/neteq/tools/neteq_performance_test.cc +++ b/modules/audio_coding/neteq/tools/neteq_performance_test.cc @@ -44,7 +44,7 @@ int64_t NetEqPerformanceTest::Run(int runtime_ms, auto audio_decoder_factory = CreateBuiltinAudioDecoderFactory(); auto neteq = DefaultNetEqFactory().CreateNetEq(config, audio_decoder_factory, clock); - // Register decoder in |neteq|. + // Register decoder in `neteq`. if (!neteq->RegisterPayloadType(kPayloadType, SdpAudioFormat("l16", kSampRateHz, 1))) return -1; diff --git a/modules/audio_coding/neteq/tools/neteq_performance_test.h b/modules/audio_coding/neteq/tools/neteq_performance_test.h index d2212f03a3..b5b4d91577 100644 --- a/modules/audio_coding/neteq/tools/neteq_performance_test.h +++ b/modules/audio_coding/neteq/tools/neteq_performance_test.h @@ -19,9 +19,9 @@ namespace test { class NetEqPerformanceTest { public: // Runs a performance test with parameters as follows: - // |runtime_ms|: the simulation time, i.e., the duration of the audio data. - // |lossrate|: drop one out of |lossrate| packets, e.g., one out of 10. - // |drift_factor|: clock drift in [0, 1]. + // `runtime_ms`: the simulation time, i.e., the duration of the audio data. + // `lossrate`: drop one out of `lossrate` packets, e.g., one out of 10. + // `drift_factor`: clock drift in [0, 1]. // Returns the runtime in ms. static int64_t Run(int runtime_ms, int lossrate, double drift_factor); }; diff --git a/modules/audio_coding/neteq/tools/neteq_quality_test.cc b/modules/audio_coding/neteq/tools/neteq_quality_test.cc index 3f3077f531..8322ac2287 100644 --- a/modules/audio_coding/neteq/tools/neteq_quality_test.cc +++ b/modules/audio_coding/neteq/tools/neteq_quality_test.cc @@ -120,8 +120,8 @@ static bool ValidateFilename(const std::string& value, bool is_output) { // ProbTrans00Solver() is to calculate the transition probability from no-loss // state to itself in a modified Gilbert Elliot packet loss model. The result is -// to achieve the target packet loss rate |loss_rate|, when a packet is not -// lost only if all |units| drawings within the duration of the packet result in +// to achieve the target packet loss rate `loss_rate`, when a packet is not +// lost only if all `units` drawings within the duration of the packet result in // no-loss. static double ProbTrans00Solver(int units, double loss_rate, @@ -310,10 +310,10 @@ void NetEqQualityTest::SetUp() { int units = block_duration_ms_ / kPacketLossTimeUnitMs; switch (absl::GetFlag(FLAGS_random_loss_mode)) { case kUniformLoss: { - // |unit_loss_rate| is the packet loss rate for each unit time interval + // `unit_loss_rate` is the packet loss rate for each unit time interval // (kPacketLossTimeUnitMs). Since a packet loss event is generated if any // of |block_duration_ms_ / kPacketLossTimeUnitMs| unit time intervals of - // a full packet duration is drawn with a loss, |unit_loss_rate| fulfills + // a full packet duration is drawn with a loss, `unit_loss_rate` fulfills // (1 - unit_loss_rate) ^ (block_duration_ms_ / kPacketLossTimeUnitMs) == // 1 - packet_loss_rate. double unit_loss_rate = @@ -322,7 +322,7 @@ void NetEqQualityTest::SetUp() { break; } case kGilbertElliotLoss: { - // |FLAGS_burst_length| should be integer times of kPacketLossTimeUnitMs. + // `FLAGS_burst_length` should be integer times of kPacketLossTimeUnitMs. ASSERT_EQ(0, absl::GetFlag(FLAGS_burst_length) % kPacketLossTimeUnitMs); // We do not allow 100 percent packet loss in Gilbert Elliot model, which diff --git a/modules/audio_coding/neteq/tools/neteq_quality_test.h b/modules/audio_coding/neteq/tools/neteq_quality_test.h index 11d347ac7e..edcb117748 100644 --- a/modules/audio_coding/neteq/tools/neteq_quality_test.h +++ b/modules/audio_coding/neteq/tools/neteq_quality_test.h @@ -108,9 +108,9 @@ class NetEqQualityTest : public ::testing::Test { void SetUp() override; // EncodeBlock(...) does the following: - // 1. encodes a block of audio, saved in |in_data| and has a length of - // |block_size_samples| (samples per channel), - // 2. save the bit stream to |payload| of |max_bytes| bytes in size, + // 1. encodes a block of audio, saved in `in_data` and has a length of + // `block_size_samples` (samples per channel), + // 2. save the bit stream to `payload` of `max_bytes` bytes in size, // 3. returns the length of the payload (in bytes), virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples, @@ -122,12 +122,12 @@ class NetEqQualityTest : public ::testing::Test { bool PacketLost(); // DecodeBlock() decodes a block of audio using the payload stored in - // |payload_| with the length of |payload_size_bytes_| (bytes). The decoded - // audio is to be stored in |out_data_|. + // `payload_` with the length of `payload_size_bytes_` (bytes). The decoded + // audio is to be stored in `out_data_`. int DecodeBlock(); - // Transmit() uses |rtp_generator_| to generate a packet and passes it to - // |neteq_|. + // Transmit() uses `rtp_generator_` to generate a packet and passes it to + // `neteq_`. int Transmit(); // Runs encoding / transmitting / decoding. diff --git a/modules/audio_coding/neteq/tools/neteq_rtpplay.cc b/modules/audio_coding/neteq/tools/neteq_rtpplay.cc index 16a789fde5..39f05e54f3 100644 --- a/modules/audio_coding/neteq/tools/neteq_rtpplay.cc +++ b/modules/audio_coding/neteq/tools/neteq_rtpplay.cc @@ -157,7 +157,7 @@ ABSL_FLAG(bool, namespace { // Parses the input string for a valid SSRC (at the start of the string). If a -// valid SSRC is found, it is written to the output variable |ssrc|, and true is +// valid SSRC is found, it is written to the output variable `ssrc`, and true is // returned. Otherwise, false is returned. bool ParseSsrc(const std::string& str, uint32_t* ssrc) { if (str.empty()) @@ -247,7 +247,7 @@ bool ValidateOutputFilesOptions(bool textlog, << std::endl; return false; } - // Without |output_audio_filename|, |output_files_base_name| is required when + // Without `output_audio_filename`, `output_files_base_name` is required when // plotting output files must be generated (in order to form a valid output // file name). if (output_audio_filename.empty() && plotting && diff --git a/modules/audio_coding/neteq/tools/neteq_stats_plotter.cc b/modules/audio_coding/neteq/tools/neteq_stats_plotter.cc index 337f54ed6e..3f06b1cfc4 100644 --- a/modules/audio_coding/neteq/tools/neteq_stats_plotter.cc +++ b/modules/audio_coding/neteq/tools/neteq_stats_plotter.cc @@ -33,8 +33,7 @@ NetEqStatsPlotter::NetEqStatsPlotter(bool make_matlab_plot, stats_getter_.reset(new NetEqStatsGetter(std::move(delay_analyzer))); } -void NetEqStatsPlotter::SimulationEnded(int64_t simulation_time_ms, - NetEq* /*neteq*/) { +void NetEqStatsPlotter::SimulationEnded(int64_t simulation_time_ms) { if (make_matlab_plot_) { auto matlab_script_name = base_file_name_; std::replace(matlab_script_name.begin(), matlab_script_name.end(), '.', diff --git a/modules/audio_coding/neteq/tools/neteq_stats_plotter.h b/modules/audio_coding/neteq/tools/neteq_stats_plotter.h index d6918670fd..c4df24e073 100644 --- a/modules/audio_coding/neteq/tools/neteq_stats_plotter.h +++ b/modules/audio_coding/neteq/tools/neteq_stats_plotter.h @@ -28,7 +28,7 @@ class NetEqStatsPlotter : public NetEqSimulationEndedCallback { bool show_concealment_events, std::string base_file_name); - void SimulationEnded(int64_t simulation_time_ms, NetEq* neteq) override; + void SimulationEnded(int64_t simulation_time_ms) override; NetEqStatsGetter* stats_getter() { return stats_getter_.get(); } diff --git a/modules/audio_coding/neteq/tools/neteq_test.cc b/modules/audio_coding/neteq/tools/neteq_test.cc index 22f5ad6931..7902438304 100644 --- a/modules/audio_coding/neteq/tools/neteq_test.cc +++ b/modules/audio_coding/neteq/tools/neteq_test.cc @@ -91,8 +91,7 @@ int64_t NetEqTest::Run() { simulation_time += step_result.simulation_step_ms; } while (!step_result.is_simulation_finished); if (callbacks_.simulation_ended_callback) { - callbacks_.simulation_ended_callback->SimulationEnded(simulation_time, - neteq_.get()); + callbacks_.simulation_ended_callback->SimulationEnded(simulation_time); } return simulation_time; } diff --git a/modules/audio_coding/neteq/tools/neteq_test.h b/modules/audio_coding/neteq/tools/neteq_test.h index 3b787a6cfb..0a6c24f3d6 100644 --- a/modules/audio_coding/neteq/tools/neteq_test.h +++ b/modules/audio_coding/neteq/tools/neteq_test.h @@ -61,7 +61,7 @@ class NetEqGetAudioCallback { class NetEqSimulationEndedCallback { public: virtual ~NetEqSimulationEndedCallback() = default; - virtual void SimulationEnded(int64_t simulation_time_ms, NetEq* neteq) = 0; + virtual void SimulationEnded(int64_t simulation_time_ms) = 0; }; // Class that provides an input--output test for NetEq. The input (both packets diff --git a/modules/audio_coding/neteq/tools/neteq_test_factory.h b/modules/audio_coding/neteq/tools/neteq_test_factory.h index fdfe650b94..cb9bb1cc51 100644 --- a/modules/audio_coding/neteq/tools/neteq_test_factory.h +++ b/modules/audio_coding/neteq/tools/neteq_test_factory.h @@ -134,7 +134,7 @@ class NetEqTestFactory { bool enable_fast_accelerate = false; // Dumps events that describes the simulation on a step-by-step basis. bool textlog = false; - // If specified and |textlog| is true, the output of |textlog| is written to + // If specified and `textlog` is true, the output of `textlog` is written to // the specified file name. absl::optional textlog_filename; // Base name for the output script files for plotting the delay profile. diff --git a/modules/audio_coding/neteq/tools/output_audio_file.h b/modules/audio_coding/neteq/tools/output_audio_file.h index 7220a36d69..491cbd0420 100644 --- a/modules/audio_coding/neteq/tools/output_audio_file.h +++ b/modules/audio_coding/neteq/tools/output_audio_file.h @@ -11,20 +11,18 @@ #ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_AUDIO_FILE_H_ #define MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_AUDIO_FILE_H_ -#include #include #include #include "modules/audio_coding/neteq/tools/audio_sink.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { namespace test { class OutputAudioFile : public AudioSink { public: - // Creates an OutputAudioFile, opening a file named |file_name| for writing. + // Creates an OutputAudioFile, opening a file named `file_name` for writing. // The file format is 16-bit signed host-endian PCM. explicit OutputAudioFile(const std::string& file_name) { out_file_ = fopen(file_name.c_str(), "wb"); @@ -35,6 +33,9 @@ class OutputAudioFile : public AudioSink { fclose(out_file_); } + OutputAudioFile(const OutputAudioFile&) = delete; + OutputAudioFile& operator=(const OutputAudioFile&) = delete; + bool WriteArray(const int16_t* audio, size_t num_samples) override { RTC_DCHECK(out_file_); return fwrite(audio, sizeof(*audio), num_samples, out_file_) == num_samples; @@ -42,8 +43,6 @@ class OutputAudioFile : public AudioSink { private: FILE* out_file_; - - RTC_DISALLOW_COPY_AND_ASSIGN(OutputAudioFile); }; } // namespace test diff --git a/modules/audio_coding/neteq/tools/output_wav_file.h b/modules/audio_coding/neteq/tools/output_wav_file.h index 6982a76a39..1485f4e911 100644 --- a/modules/audio_coding/neteq/tools/output_wav_file.h +++ b/modules/audio_coding/neteq/tools/output_wav_file.h @@ -15,20 +15,22 @@ #include "common_audio/wav_file.h" #include "modules/audio_coding/neteq/tools/audio_sink.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { namespace test { class OutputWavFile : public AudioSink { public: - // Creates an OutputWavFile, opening a file named |file_name| for writing. + // Creates an OutputWavFile, opening a file named `file_name` for writing. // The output file is a PCM encoded wav file. OutputWavFile(const std::string& file_name, int sample_rate_hz, int num_channels = 1) : wav_writer_(file_name, sample_rate_hz, num_channels) {} + OutputWavFile(const OutputWavFile&) = delete; + OutputWavFile& operator=(const OutputWavFile&) = delete; + bool WriteArray(const int16_t* audio, size_t num_samples) override { wav_writer_.WriteSamples(audio, num_samples); return true; @@ -36,8 +38,6 @@ class OutputWavFile : public AudioSink { private: WavWriter wav_writer_; - - RTC_DISALLOW_COPY_AND_ASSIGN(OutputWavFile); }; } // namespace test diff --git a/modules/audio_coding/neteq/tools/packet.h b/modules/audio_coding/neteq/tools/packet.h index ef118d9f0b..96710907df 100644 --- a/modules/audio_coding/neteq/tools/packet.h +++ b/modules/audio_coding/neteq/tools/packet.h @@ -16,7 +16,6 @@ #include "api/array_view.h" #include "api/rtp_headers.h" #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/copy_on_write_buffer.h" namespace webrtc { @@ -54,13 +53,16 @@ class Packet { virtual ~Packet(); + Packet(const Packet&) = delete; + Packet& operator=(const Packet&) = delete; + // Parses the first bytes of the RTP payload, interpreting them as RED headers - // according to RFC 2198. The headers will be inserted into |headers|. The + // according to RFC 2198. The headers will be inserted into `headers`. The // caller of the method assumes ownership of the objects in the list, and // must delete them properly. bool ExtractRedHeaders(std::list* headers) const; - // Deletes all RTPHeader objects in |headers|, but does not delete |headers| + // Deletes all RTPHeader objects in `headers`, but does not delete `headers` // itself. static void DeleteRedHeaders(std::list* headers); @@ -95,8 +97,6 @@ class Packet { size_t virtual_payload_length_bytes_ = 0; const double time_ms_; // Used to denote a packet's arrival time. const bool valid_header_; - - RTC_DISALLOW_COPY_AND_ASSIGN(Packet); }; } // namespace test diff --git a/modules/audio_coding/neteq/tools/packet_source.h b/modules/audio_coding/neteq/tools/packet_source.h index 975680f5a9..be1705cae1 100644 --- a/modules/audio_coding/neteq/tools/packet_source.h +++ b/modules/audio_coding/neteq/tools/packet_source.h @@ -15,7 +15,6 @@ #include #include "modules/audio_coding/neteq/tools/packet.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { namespace test { @@ -26,6 +25,9 @@ class PacketSource { PacketSource(); virtual ~PacketSource(); + PacketSource(const PacketSource&) = delete; + PacketSource& operator=(const PacketSource&) = delete; + // Returns next packet. Returns nullptr if the source is depleted, or if an // error occurred. virtual std::unique_ptr NextPacket() = 0; @@ -34,9 +36,6 @@ class PacketSource { protected: std::bitset<128> filter_; // Payload type is 7 bits in the RFC. - - private: - RTC_DISALLOW_COPY_AND_ASSIGN(PacketSource); }; } // namespace test diff --git a/modules/audio_coding/neteq/tools/packet_unittest.cc b/modules/audio_coding/neteq/tools/packet_unittest.cc index 7cc9a48ee6..69cf56b529 100644 --- a/modules/audio_coding/neteq/tools/packet_unittest.cc +++ b/modules/audio_coding/neteq/tools/packet_unittest.cc @@ -124,17 +124,17 @@ TEST(TestPacket, DummyPaddingPacket) { } namespace { -// Writes one RED block header starting at |rtp_data|, according to RFC 2198. +// Writes one RED block header starting at `rtp_data`, according to RFC 2198. // returns the number of bytes written (1 or 4). // -// Format if |last_payoad| is false: +// Format if `last_payoad` is false: // 0 1 2 3 // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // |1| block PT | timestamp offset | block length | // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // -// Format if |last_payoad| is true: +// Format if `last_payoad` is true: // 0 1 2 3 4 5 6 7 // +-+-+-+-+-+-+-+-+ // |0| Block PT | @@ -183,7 +183,7 @@ TEST(TestPacket, RED) { last_block, payload_ptr); } const double kPacketTime = 1.0; - // Hand over ownership of |packet_memory| to |packet|. + // Hand over ownership of `packet_memory` to `packet`. Packet packet(packet_memory, kPacketLengthBytes, kPacketTime); ASSERT_TRUE(packet.valid_header()); EXPECT_EQ(kRedPayloadType, packet.header().payloadType); diff --git a/modules/audio_coding/neteq/tools/resample_input_audio_file.h b/modules/audio_coding/neteq/tools/resample_input_audio_file.h index 9106d5b769..497a4109df 100644 --- a/modules/audio_coding/neteq/tools/resample_input_audio_file.h +++ b/modules/audio_coding/neteq/tools/resample_input_audio_file.h @@ -15,7 +15,6 @@ #include "common_audio/resampler/include/resampler.h" #include "modules/audio_coding/neteq/tools/input_audio_file.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { namespace test { @@ -37,6 +36,9 @@ class ResampleInputAudioFile : public InputAudioFile { file_rate_hz_(file_rate_hz), output_rate_hz_(output_rate_hz) {} + ResampleInputAudioFile(const ResampleInputAudioFile&) = delete; + ResampleInputAudioFile& operator=(const ResampleInputAudioFile&) = delete; + bool Read(size_t samples, int output_rate_hz, int16_t* destination); bool Read(size_t samples, int16_t* destination) override; void set_output_rate_hz(int rate_hz); @@ -45,7 +47,6 @@ class ResampleInputAudioFile : public InputAudioFile { const int file_rate_hz_; int output_rate_hz_; Resampler resampler_; - RTC_DISALLOW_COPY_AND_ASSIGN(ResampleInputAudioFile); }; } // namespace test diff --git a/modules/audio_coding/neteq/tools/rtc_event_log_source.h b/modules/audio_coding/neteq/tools/rtc_event_log_source.h index 3c91f73eb8..e2d0f61666 100644 --- a/modules/audio_coding/neteq/tools/rtc_event_log_source.h +++ b/modules/audio_coding/neteq/tools/rtc_event_log_source.h @@ -19,7 +19,6 @@ #include "logging/rtc_event_log/rtc_event_log_parser.h" #include "modules/audio_coding/neteq/tools/packet_source.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -31,7 +30,7 @@ class Packet; class RtcEventLogSource : public PacketSource { public: - // Creates an RtcEventLogSource reading from |file_name|. If the file cannot + // Creates an RtcEventLogSource reading from `file_name`. If the file cannot // be opened, or has the wrong format, NULL will be returned. static std::unique_ptr CreateFromFile( const std::string& file_name, @@ -43,6 +42,9 @@ class RtcEventLogSource : public PacketSource { virtual ~RtcEventLogSource(); + RtcEventLogSource(const RtcEventLogSource&) = delete; + RtcEventLogSource& operator=(const RtcEventLogSource&) = delete; + std::unique_ptr NextPacket() override; // Returns the timestamp of the next audio output event, in milliseconds. The @@ -60,8 +62,6 @@ class RtcEventLogSource : public PacketSource { size_t rtp_packet_index_ = 0; std::vector audio_outputs_; size_t audio_output_index_ = 0; - - RTC_DISALLOW_COPY_AND_ASSIGN(RtcEventLogSource); }; } // namespace test diff --git a/modules/audio_coding/neteq/tools/rtp_analyze.cc b/modules/audio_coding/neteq/tools/rtp_analyze.cc index 46fc2d744e..7ecf925ebb 100644 --- a/modules/audio_coding/neteq/tools/rtp_analyze.cc +++ b/modules/audio_coding/neteq/tools/rtp_analyze.cc @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include #include #include diff --git a/modules/audio_coding/neteq/tools/rtp_encode.cc b/modules/audio_coding/neteq/tools/rtp_encode.cc index 204f169a6d..ee392f26a3 100644 --- a/modules/audio_coding/neteq/tools/rtp_encode.cc +++ b/modules/audio_coding/neteq/tools/rtp_encode.cc @@ -191,7 +191,7 @@ AudioEncoderL16::Config Pcm16bConfig(CodecType codec_type) { config.sample_rate_hz = 48000; return config; default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return config; } } @@ -242,7 +242,7 @@ std::unique_ptr CreateEncoder(CodecType codec_type, GetCodecConfig(), payload_type); } } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return nullptr; } @@ -259,7 +259,7 @@ AudioEncoderCngConfig GetCngConfig(int sample_rate_hz) { case 48000: return 100; default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } return 0; }; diff --git a/modules/audio_coding/neteq/tools/rtp_file_source.cc b/modules/audio_coding/neteq/tools/rtp_file_source.cc index 16b225e5df..e9c6fed951 100644 --- a/modules/audio_coding/neteq/tools/rtp_file_source.cc +++ b/modules/audio_coding/neteq/tools/rtp_file_source.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/neteq/tools/rtp_file_source.h" -#include #include #ifndef WIN32 #include diff --git a/modules/audio_coding/neteq/tools/rtp_file_source.h b/modules/audio_coding/neteq/tools/rtp_file_source.h index 953e2fae5f..7e284aca45 100644 --- a/modules/audio_coding/neteq/tools/rtp_file_source.h +++ b/modules/audio_coding/neteq/tools/rtp_file_source.h @@ -19,8 +19,6 @@ #include "absl/types/optional.h" #include "modules/audio_coding/neteq/tools/packet_source.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "modules/rtp_rtcp/source/rtp_utility.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -30,7 +28,7 @@ class RtpFileReader; class RtpFileSource : public PacketSource { public: - // Creates an RtpFileSource reading from |file_name|. If the file cannot be + // Creates an RtpFileSource reading from `file_name`. If the file cannot be // opened, or has the wrong format, NULL will be returned. static RtpFileSource* Create( const std::string& file_name, @@ -42,7 +40,10 @@ class RtpFileSource : public PacketSource { ~RtpFileSource() override; - // Registers an RTP header extension and binds it to |id|. + RtpFileSource(const RtpFileSource&) = delete; + RtpFileSource& operator=(const RtpFileSource&) = delete; + + // Registers an RTP header extension and binds it to `id`. virtual bool RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id); std::unique_ptr NextPacket() override; @@ -59,8 +60,6 @@ class RtpFileSource : public PacketSource { std::unique_ptr rtp_reader_; const absl::optional ssrc_filter_; RtpHeaderExtensionMap rtp_header_extension_map_; - - RTC_DISALLOW_COPY_AND_ASSIGN(RtpFileSource); }; } // namespace test diff --git a/modules/audio_coding/neteq/tools/rtp_generator.cc b/modules/audio_coding/neteq/tools/rtp_generator.cc index a37edef20a..e883fc11d6 100644 --- a/modules/audio_coding/neteq/tools/rtp_generator.cc +++ b/modules/audio_coding/neteq/tools/rtp_generator.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/neteq/tools/rtp_generator.h" -#include namespace webrtc { namespace test { @@ -51,7 +50,7 @@ uint32_t TimestampJumpRtpGenerator::GetRtpHeader(uint8_t payload_type, if (timestamp_ - static_cast(payload_length_samples) <= jump_from_timestamp_ && timestamp_ > jump_from_timestamp_) { - // We just moved across the |jump_from_timestamp_| timestamp. Do the jump. + // We just moved across the `jump_from_timestamp_` timestamp. Do the jump. timestamp_ = jump_to_timestamp_; } return ret; diff --git a/modules/audio_coding/neteq/tools/rtp_generator.h b/modules/audio_coding/neteq/tools/rtp_generator.h index 1454c5796a..2e615adec5 100644 --- a/modules/audio_coding/neteq/tools/rtp_generator.h +++ b/modules/audio_coding/neteq/tools/rtp_generator.h @@ -12,7 +12,6 @@ #define MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_GENERATOR_H_ #include "api/rtp_headers.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { namespace test { @@ -34,9 +33,12 @@ class RtpGenerator { virtual ~RtpGenerator() {} - // Writes the next RTP header to |rtp_header|, which will be of type - // |payload_type|. Returns the send time for this packet (in ms). The value of - // |payload_length_samples| determines the send time for the next packet. + RtpGenerator(const RtpGenerator&) = delete; + RtpGenerator& operator=(const RtpGenerator&) = delete; + + // Writes the next RTP header to `rtp_header`, which will be of type + // `payload_type`. Returns the send time for this packet (in ms). The value of + // `payload_length_samples` determines the send time for the next packet. virtual uint32_t GetRtpHeader(uint8_t payload_type, size_t payload_length_samples, RTPHeader* rtp_header); @@ -50,9 +52,6 @@ class RtpGenerator { const uint32_t ssrc_; const int samples_per_ms_; double drift_factor_; - - private: - RTC_DISALLOW_COPY_AND_ASSIGN(RtpGenerator); }; class TimestampJumpRtpGenerator : public RtpGenerator { @@ -66,6 +65,10 @@ class TimestampJumpRtpGenerator : public RtpGenerator { jump_from_timestamp_(jump_from_timestamp), jump_to_timestamp_(jump_to_timestamp) {} + TimestampJumpRtpGenerator(const TimestampJumpRtpGenerator&) = delete; + TimestampJumpRtpGenerator& operator=(const TimestampJumpRtpGenerator&) = + delete; + uint32_t GetRtpHeader(uint8_t payload_type, size_t payload_length_samples, RTPHeader* rtp_header) override; @@ -73,7 +76,6 @@ class TimestampJumpRtpGenerator : public RtpGenerator { private: uint32_t jump_from_timestamp_; uint32_t jump_to_timestamp_; - RTC_DISALLOW_COPY_AND_ASSIGN(TimestampJumpRtpGenerator); }; } // namespace test diff --git a/modules/audio_coding/neteq/underrun_optimizer.cc b/modules/audio_coding/neteq/underrun_optimizer.cc new file mode 100644 index 0000000000..baed812327 --- /dev/null +++ b/modules/audio_coding/neteq/underrun_optimizer.cc @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_coding/neteq/underrun_optimizer.h" + +#include + +namespace webrtc { + +namespace { + +constexpr int kDelayBuckets = 100; +constexpr int kBucketSizeMs = 20; + +} // namespace + +UnderrunOptimizer::UnderrunOptimizer(const TickTimer* tick_timer, + int histogram_quantile, + int forget_factor, + absl::optional start_forget_weight, + absl::optional resample_interval_ms) + : tick_timer_(tick_timer), + histogram_(kDelayBuckets, forget_factor, start_forget_weight), + histogram_quantile_(histogram_quantile), + resample_interval_ms_(resample_interval_ms) {} + +void UnderrunOptimizer::Update(int relative_delay_ms) { + absl::optional histogram_update; + if (resample_interval_ms_) { + if (!resample_stopwatch_) { + resample_stopwatch_ = tick_timer_->GetNewStopwatch(); + } + if (static_cast(resample_stopwatch_->ElapsedMs()) > + *resample_interval_ms_) { + histogram_update = max_delay_in_interval_ms_; + resample_stopwatch_ = tick_timer_->GetNewStopwatch(); + max_delay_in_interval_ms_ = 0; + } + max_delay_in_interval_ms_ = + std::max(max_delay_in_interval_ms_, relative_delay_ms); + } else { + histogram_update = relative_delay_ms; + } + if (!histogram_update) { + return; + } + + const int index = *histogram_update / kBucketSizeMs; + if (index < histogram_.NumBuckets()) { + // Maximum delay to register is 2000 ms. + histogram_.Add(index); + } + int bucket_index = histogram_.Quantile(histogram_quantile_); + optimal_delay_ms_ = (1 + bucket_index) * kBucketSizeMs; +} + +void UnderrunOptimizer::Reset() { + histogram_.Reset(); + resample_stopwatch_.reset(); + max_delay_in_interval_ms_ = 0; + optimal_delay_ms_.reset(); +} + +} // namespace webrtc diff --git a/modules/audio_coding/neteq/underrun_optimizer.h b/modules/audio_coding/neteq/underrun_optimizer.h new file mode 100644 index 0000000000..b37ce18795 --- /dev/null +++ b/modules/audio_coding/neteq/underrun_optimizer.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_CODING_NETEQ_UNDERRUN_OPTIMIZER_H_ +#define MODULES_AUDIO_CODING_NETEQ_UNDERRUN_OPTIMIZER_H_ + +#include + +#include "absl/types/optional.h" +#include "api/neteq/tick_timer.h" +#include "modules/audio_coding/neteq/histogram.h" + +namespace webrtc { + +// Estimates probability of buffer underrun due to late packet arrival. +// The optimal delay is decided such that the probability of underrun is lower +// than 1 - `histogram_quantile`. +class UnderrunOptimizer { + public: + UnderrunOptimizer(const TickTimer* tick_timer, + int histogram_quantile, + int forget_factor, + absl::optional start_forget_weight, + absl::optional resample_interval_ms); + + void Update(int relative_delay_ms); + + absl::optional GetOptimalDelayMs() const { return optimal_delay_ms_; } + + void Reset(); + + private: + const TickTimer* tick_timer_; + Histogram histogram_; + const int histogram_quantile_; // In Q30. + const absl::optional resample_interval_ms_; + std::unique_ptr resample_stopwatch_; + int max_delay_in_interval_ms_ = 0; + absl::optional optimal_delay_ms_; +}; + +} // namespace webrtc +#endif // MODULES_AUDIO_CODING_NETEQ_UNDERRUN_OPTIMIZER_H_ diff --git a/modules/audio_coding/neteq/underrun_optimizer_unittest.cc b/modules/audio_coding/neteq/underrun_optimizer_unittest.cc new file mode 100644 index 0000000000..a86e9cf107 --- /dev/null +++ b/modules/audio_coding/neteq/underrun_optimizer_unittest.cc @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_coding/neteq/underrun_optimizer.h" + +#include "test/gtest.h" + +namespace webrtc { + +namespace { + +constexpr int kDefaultHistogramQuantile = 1020054733; // 0.95 in Q30. +constexpr int kForgetFactor = 32745; // 0.9993 in Q15. + +} // namespace + +TEST(UnderrunOptimizerTest, ResamplePacketDelays) { + TickTimer tick_timer; + constexpr int kResampleIntervalMs = 500; + UnderrunOptimizer underrun_optimizer(&tick_timer, kDefaultHistogramQuantile, + kForgetFactor, absl::nullopt, + kResampleIntervalMs); + + // The histogram should be updated once with the maximum delay observed for + // the following sequence of updates. + for (int i = 0; i < 500; i += 20) { + underrun_optimizer.Update(i); + EXPECT_FALSE(underrun_optimizer.GetOptimalDelayMs()); + } + tick_timer.Increment(kResampleIntervalMs / tick_timer.ms_per_tick() + 1); + underrun_optimizer.Update(0); + EXPECT_EQ(underrun_optimizer.GetOptimalDelayMs(), 500); +} + +} // namespace webrtc diff --git a/modules/audio_coding/test/Channel.cc b/modules/audio_coding/test/Channel.cc index d7bd6a968b..ec0eccb5d9 100644 --- a/modules/audio_coding/test/Channel.cc +++ b/modules/audio_coding/test/Channel.cc @@ -10,7 +10,6 @@ #include "modules/audio_coding/test/Channel.h" -#include #include diff --git a/modules/audio_coding/test/EncodeDecodeTest.h b/modules/audio_coding/test/EncodeDecodeTest.h index c96a4d69f4..fbc42fbe0e 100644 --- a/modules/audio_coding/test/EncodeDecodeTest.h +++ b/modules/audio_coding/test/EncodeDecodeTest.h @@ -28,11 +28,11 @@ class TestPacketization : public AudioPacketizationCallback { public: TestPacketization(RTPStream* rtpStream, uint16_t frequency); ~TestPacketization(); - int32_t SendData(const AudioFrameType frameType, - const uint8_t payloadType, - const uint32_t timeStamp, + int32_t SendData(AudioFrameType frameType, + uint8_t payloadType, + uint32_t timeStamp, const uint8_t* payloadData, - const size_t payloadSize, + size_t payloadSize, int64_t absolute_capture_timestamp_ms) override; private: diff --git a/modules/audio_coding/test/RTPFile.h b/modules/audio_coding/test/RTPFile.h index a3d1520922..a9f574bab0 100644 --- a/modules/audio_coding/test/RTPFile.h +++ b/modules/audio_coding/test/RTPFile.h @@ -25,11 +25,11 @@ class RTPStream { public: virtual ~RTPStream() {} - virtual void Write(const uint8_t payloadType, - const uint32_t timeStamp, - const int16_t seqNo, + virtual void Write(uint8_t payloadType, + uint32_t timeStamp, + int16_t seqNo, const uint8_t* payloadData, - const size_t payloadSize, + size_t payloadSize, uint32_t frequency) = 0; // Returns the packet's payload size. Zero should be treated as an @@ -75,11 +75,11 @@ class RTPBuffer : public RTPStream { ~RTPBuffer() = default; - void Write(const uint8_t payloadType, - const uint32_t timeStamp, - const int16_t seqNo, + void Write(uint8_t payloadType, + uint32_t timeStamp, + int16_t seqNo, const uint8_t* payloadData, - const size_t payloadSize, + size_t payloadSize, uint32_t frequency) override; size_t Read(RTPHeader* rtp_header, @@ -108,11 +108,11 @@ class RTPFile : public RTPStream { void ReadHeader(); - void Write(const uint8_t payloadType, - const uint32_t timeStamp, - const int16_t seqNo, + void Write(uint8_t payloadType, + uint32_t timeStamp, + int16_t seqNo, const uint8_t* payloadData, - const size_t payloadSize, + size_t payloadSize, uint32_t frequency) override; size_t Read(RTPHeader* rtp_header, diff --git a/modules/audio_coding/test/TestAllCodecs.cc b/modules/audio_coding/test/TestAllCodecs.cc index 9cb37524d0..e93df346f1 100644 --- a/modules/audio_coding/test/TestAllCodecs.cc +++ b/modules/audio_coding/test/TestAllCodecs.cc @@ -320,7 +320,7 @@ void TestAllCodecs::RegisterSendCodec(char side, // If G.722, store half the size to compensate for the timestamp bug in the // RFC for G.722. // If iSAC runs in adaptive mode, packet size in samples can change on the - // fly, so we exclude this test by setting |packet_size_samples_| to -1. + // fly, so we exclude this test by setting `packet_size_samples_` to -1. int clockrate_hz = sampling_freq_hz; size_t num_channels = 1; if (absl::EqualsIgnoreCase(codec_name, "G722")) { diff --git a/modules/audio_coding/test/TestStereo.cc b/modules/audio_coding/test/TestStereo.cc index 61d27aab18..599fafb602 100644 --- a/modules/audio_coding/test/TestStereo.cc +++ b/modules/audio_coding/test/TestStereo.cc @@ -508,9 +508,9 @@ void TestStereo::Run(TestPackStereo* channel, in_file_stereo_->FastForward(100); in_file_mono_->FastForward(100); - while (1) { - // Simulate packet loss by setting |packet_loss_| to "true" in - // |percent_loss| percent of the loops. + while (true) { + // Simulate packet loss by setting `packet_loss_` to "true" in + // `percent_loss` percent of the loops. if (percent_loss > 0) { if (counter_ == floor((100 / percent_loss) + 0.5)) { counter_ = 0; diff --git a/modules/audio_coding/test/TestStereo.h b/modules/audio_coding/test/TestStereo.h index 3ee4dbf594..4c50a4b555 100644 --- a/modules/audio_coding/test/TestStereo.h +++ b/modules/audio_coding/test/TestStereo.h @@ -31,11 +31,11 @@ class TestPackStereo : public AudioPacketizationCallback { void RegisterReceiverACM(AudioCodingModule* acm); - int32_t SendData(const AudioFrameType frame_type, - const uint8_t payload_type, - const uint32_t timestamp, + int32_t SendData(AudioFrameType frame_type, + uint8_t payload_type, + uint32_t timestamp, const uint8_t* payload_data, - const size_t payload_size, + size_t payload_size, int64_t absolute_capture_timestamp_ms) override; uint16_t payload_size(); diff --git a/modules/audio_coding/test/TestVADDTX.h b/modules/audio_coding/test/TestVADDTX.h index cce802d9f0..9c6791a224 100644 --- a/modules/audio_coding/test/TestVADDTX.h +++ b/modules/audio_coding/test/TestVADDTX.h @@ -23,7 +23,7 @@ namespace webrtc { // This class records the frame type, and delegates actual sending to the -// |next_| AudioPacketizationCallback. +// `next_` AudioPacketizationCallback. class MonitoringAudioPacketizationCallback : public AudioPacketizationCallback { public: explicit MonitoringAudioPacketizationCallback( @@ -67,9 +67,9 @@ class TestVadDtx { // the expectation. Saves result to a file. // expects[x] means // -1 : do not care, - // 0 : there have been no packets of type |x|, - // 1 : there have been packets of type |x|, - // with |x| indicates the following packet types + // 0 : there have been no packets of type `x`, + // 1 : there have been packets of type `x`, + // with `x` indicates the following packet types // 0 - kEmptyFrame // 1 - kAudioFrameSpeech // 2 - kAudioFrameCN diff --git a/modules/audio_coding/test/TwoWayCommunication.h b/modules/audio_coding/test/TwoWayCommunication.h index 7d0cdb9566..b7eb9e5583 100644 --- a/modules/audio_coding/test/TwoWayCommunication.h +++ b/modules/audio_coding/test/TwoWayCommunication.h @@ -31,9 +31,9 @@ class TwoWayCommunication { private: void SetUpAutotest(AudioEncoderFactory* const encoder_factory, const SdpAudioFormat& format1, - const int payload_type1, + int payload_type1, const SdpAudioFormat& format2, - const int payload_type2); + int payload_type2); std::unique_ptr _acmA; std::unique_ptr _acmB; diff --git a/modules/audio_coding/test/iSACTest.cc b/modules/audio_coding/test/iSACTest.cc index 94a6889ee7..52f5a6ad4f 100644 --- a/modules/audio_coding/test/iSACTest.cc +++ b/modules/audio_coding/test/iSACTest.cc @@ -257,11 +257,6 @@ void ISACTest::EncodeDecode(int testNr, wbISACConfig), kISAC16kPayloadType)); - bool adaptiveMode = false; - if ((swbISACConfig.currentRateBitPerSec == -1) || - (wbISACConfig.currentRateBitPerSec == -1)) { - adaptiveMode = true; - } _myTimer.Reset(); _channel_A2B->ResetStats(); _channel_B2A->ResetStats(); diff --git a/modules/audio_coding/test/opus_test.cc b/modules/audio_coding/test/opus_test.cc index 5f70c0328d..6822bc3d72 100644 --- a/modules/audio_coding/test/opus_test.cc +++ b/modules/audio_coding/test/opus_test.cc @@ -277,8 +277,8 @@ void OpusTest::Run(TestPackStereo* channel, ASSERT_GE(bitstream_len_byte_int, 0); bitstream_len_byte = static_cast(bitstream_len_byte_int); - // Simulate packet loss by setting |packet_loss_| to "true" in - // |percent_loss| percent of the loops. + // Simulate packet loss by setting `packet_loss_` to "true" in + // `percent_loss` percent of the loops. // TODO(tlegrand): Move handling of loss simulation to TestPackStereo. if (percent_loss > 0) { if (counter_ == floor((100 / percent_loss) + 0.5)) { diff --git a/modules/audio_device/BUILD.gn b/modules/audio_device/BUILD.gn index 5d6a1d82fc..d441479b33 100644 --- a/modules/audio_device/BUILD.gn +++ b/modules/audio_device/BUILD.gn @@ -442,7 +442,7 @@ if (!build_with_chromium && is_android) { ] deps = [ "../../rtc_base:base_java", - "//third_party/android_deps:com_android_support_support_annotations_java", + "//third_party/androidx:androidx_annotation_annotation_java", ] } } diff --git a/modules/audio_device/android/aaudio_player.cc b/modules/audio_device/android/aaudio_player.cc index 6d310ed8e4..5257b2ba1b 100644 --- a/modules/audio_device/android/aaudio_player.cc +++ b/modules/audio_device/android/aaudio_player.cc @@ -27,19 +27,19 @@ enum AudioDeviceMessageType : uint32_t { AAudioPlayer::AAudioPlayer(AudioManager* audio_manager) : main_thread_(rtc::Thread::Current()), aaudio_(audio_manager, AAUDIO_DIRECTION_OUTPUT, this) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; thread_checker_aaudio_.Detach(); } AAudioPlayer::~AAudioPlayer() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK_RUN_ON(&main_thread_checker_); Terminate(); - RTC_LOG(INFO) << "#detected underruns: " << underrun_count_; + RTC_LOG(LS_INFO) << "#detected underruns: " << underrun_count_; } int AAudioPlayer::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; RTC_DCHECK_RUN_ON(&main_thread_checker_); if (aaudio_.audio_parameters().channels() == 2) { RTC_DLOG(LS_WARNING) << "Stereo mode is enabled"; @@ -48,14 +48,14 @@ int AAudioPlayer::Init() { } int AAudioPlayer::Terminate() { - RTC_LOG(INFO) << "Terminate"; + RTC_LOG(LS_INFO) << "Terminate"; RTC_DCHECK_RUN_ON(&main_thread_checker_); StopPlayout(); return 0; } int AAudioPlayer::InitPlayout() { - RTC_LOG(INFO) << "InitPlayout"; + RTC_LOG(LS_INFO) << "InitPlayout"; RTC_DCHECK_RUN_ON(&main_thread_checker_); RTC_DCHECK(!initialized_); RTC_DCHECK(!playing_); @@ -72,7 +72,7 @@ bool AAudioPlayer::PlayoutIsInitialized() const { } int AAudioPlayer::StartPlayout() { - RTC_LOG(INFO) << "StartPlayout"; + RTC_LOG(LS_INFO) << "StartPlayout"; RTC_DCHECK_RUN_ON(&main_thread_checker_); RTC_DCHECK(!playing_); if (!initialized_) { @@ -93,7 +93,7 @@ int AAudioPlayer::StartPlayout() { } int AAudioPlayer::StopPlayout() { - RTC_LOG(INFO) << "StopPlayout"; + RTC_LOG(LS_INFO) << "StopPlayout"; RTC_DCHECK_RUN_ON(&main_thread_checker_); if (!initialized_ || !playing_) { return 0; @@ -114,7 +114,7 @@ bool AAudioPlayer::Playing() const { } void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { - RTC_DLOG(INFO) << "AttachAudioBuffer"; + RTC_DLOG(LS_INFO) << "AttachAudioBuffer"; RTC_DCHECK_RUN_ON(&main_thread_checker_); audio_device_buffer_ = audioBuffer; const AudioParameters audio_parameters = aaudio_.audio_parameters(); @@ -142,7 +142,7 @@ void AAudioPlayer::OnErrorCallback(aaudio_result_t error) { if (aaudio_.stream_state() == AAUDIO_STREAM_STATE_DISCONNECTED) { // The stream is disconnected and any attempt to use it will return // AAUDIO_ERROR_DISCONNECTED. - RTC_LOG(WARNING) << "Output stream disconnected"; + RTC_LOG(LS_WARNING) << "Output stream disconnected"; // AAudio documentation states: "You should not close or reopen the stream // from the callback, use another thread instead". A message is therefore // sent to the main thread to do the restart operation. @@ -157,9 +157,9 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data, // Log device id in first data callback to ensure that a valid device is // utilized. if (first_data_callback_) { - RTC_LOG(INFO) << "--- First output data callback: " - "device id=" - << aaudio_.device_id(); + RTC_LOG(LS_INFO) << "--- First output data callback: " + "device id=" + << aaudio_.device_id(); first_data_callback_ = false; } @@ -179,12 +179,12 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data, latency_millis_ = aaudio_.EstimateLatencyMillis(); // TODO(henrika): use for development only. if (aaudio_.frames_written() % (1000 * aaudio_.frames_per_burst()) == 0) { - RTC_DLOG(INFO) << "output latency: " << latency_millis_ - << ", num_frames: " << num_frames; + RTC_DLOG(LS_INFO) << "output latency: " << latency_millis_ + << ", num_frames: " << num_frames; } // Read audio data from the WebRTC source using the FineAudioBuffer object - // and write that data into |audio_data| to be played out by AAudio. + // and write that data into `audio_data` to be played out by AAudio. // Prime output with zeros during a short initial phase to avoid distortion. // TODO(henrika): do more work to figure out of if the initial forced silence // period is really needed. @@ -215,7 +215,7 @@ void AAudioPlayer::OnMessage(rtc::Message* msg) { void AAudioPlayer::HandleStreamDisconnected() { RTC_DCHECK_RUN_ON(&main_thread_checker_); - RTC_DLOG(INFO) << "HandleStreamDisconnected"; + RTC_DLOG(LS_INFO) << "HandleStreamDisconnected"; if (!initialized_ || !playing_) { return; } diff --git a/modules/audio_device/android/aaudio_player.h b/modules/audio_device/android/aaudio_player.h index 9e9182aed8..4bf3ee3bc0 100644 --- a/modules/audio_device/android/aaudio_player.h +++ b/modules/audio_device/android/aaudio_player.h @@ -76,8 +76,8 @@ class AAudioPlayer final : public AAudioObserverInterface, protected: // AAudioObserverInterface implementation. - // For an output stream, this function should render and write |num_frames| - // of data in the streams current data format to the |audio_data| buffer. + // For an output stream, this function should render and write `num_frames` + // of data in the streams current data format to the `audio_data` buffer. // Called on a real-time thread owned by AAudio. aaudio_data_callback_result_t OnDataCallback(void* audio_data, int32_t num_frames) override; diff --git a/modules/audio_device/android/aaudio_recorder.cc b/modules/audio_device/android/aaudio_recorder.cc index 95f1a1a3cd..4757cf8cf0 100644 --- a/modules/audio_device/android/aaudio_recorder.cc +++ b/modules/audio_device/android/aaudio_recorder.cc @@ -28,19 +28,19 @@ enum AudioDeviceMessageType : uint32_t { AAudioRecorder::AAudioRecorder(AudioManager* audio_manager) : main_thread_(rtc::Thread::Current()), aaudio_(audio_manager, AAUDIO_DIRECTION_INPUT, this) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; thread_checker_aaudio_.Detach(); } AAudioRecorder::~AAudioRecorder() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); Terminate(); - RTC_LOG(INFO) << "detected owerflows: " << overflow_count_; + RTC_LOG(LS_INFO) << "detected owerflows: " << overflow_count_; } int AAudioRecorder::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; RTC_DCHECK(thread_checker_.IsCurrent()); if (aaudio_.audio_parameters().channels() == 2) { RTC_DLOG(LS_WARNING) << "Stereo mode is enabled"; @@ -49,14 +49,14 @@ int AAudioRecorder::Init() { } int AAudioRecorder::Terminate() { - RTC_LOG(INFO) << "Terminate"; + RTC_LOG(LS_INFO) << "Terminate"; RTC_DCHECK(thread_checker_.IsCurrent()); StopRecording(); return 0; } int AAudioRecorder::InitRecording() { - RTC_LOG(INFO) << "InitRecording"; + RTC_LOG(LS_INFO) << "InitRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!initialized_); RTC_DCHECK(!recording_); @@ -68,7 +68,7 @@ int AAudioRecorder::InitRecording() { } int AAudioRecorder::StartRecording() { - RTC_LOG(INFO) << "StartRecording"; + RTC_LOG(LS_INFO) << "StartRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(initialized_); RTC_DCHECK(!recording_); @@ -85,7 +85,7 @@ int AAudioRecorder::StartRecording() { } int AAudioRecorder::StopRecording() { - RTC_LOG(INFO) << "StopRecording"; + RTC_LOG(LS_INFO) << "StopRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); if (!initialized_ || !recording_) { return 0; @@ -100,7 +100,7 @@ int AAudioRecorder::StopRecording() { } void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { - RTC_LOG(INFO) << "AttachAudioBuffer"; + RTC_LOG(LS_INFO) << "AttachAudioBuffer"; RTC_DCHECK(thread_checker_.IsCurrent()); audio_device_buffer_ = audioBuffer; const AudioParameters audio_parameters = aaudio_.audio_parameters(); @@ -114,19 +114,19 @@ void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { } int AAudioRecorder::EnableBuiltInAEC(bool enable) { - RTC_LOG(INFO) << "EnableBuiltInAEC: " << enable; + RTC_LOG(LS_INFO) << "EnableBuiltInAEC: " << enable; RTC_LOG(LS_ERROR) << "Not implemented"; return -1; } int AAudioRecorder::EnableBuiltInAGC(bool enable) { - RTC_LOG(INFO) << "EnableBuiltInAGC: " << enable; + RTC_LOG(LS_INFO) << "EnableBuiltInAGC: " << enable; RTC_LOG(LS_ERROR) << "Not implemented"; return -1; } int AAudioRecorder::EnableBuiltInNS(bool enable) { - RTC_LOG(INFO) << "EnableBuiltInNS: " << enable; + RTC_LOG(LS_INFO) << "EnableBuiltInNS: " << enable; RTC_LOG(LS_ERROR) << "Not implemented"; return -1; } @@ -137,7 +137,7 @@ void AAudioRecorder::OnErrorCallback(aaudio_result_t error) { if (aaudio_.stream_state() == AAUDIO_STREAM_STATE_DISCONNECTED) { // The stream is disconnected and any attempt to use it will return // AAUDIO_ERROR_DISCONNECTED.. - RTC_LOG(WARNING) << "Input stream disconnected => restart is required"; + RTC_LOG(LS_WARNING) << "Input stream disconnected => restart is required"; // AAudio documentation states: "You should not close or reopen the stream // from the callback, use another thread instead". A message is therefore // sent to the main thread to do the restart operation. @@ -146,7 +146,7 @@ void AAudioRecorder::OnErrorCallback(aaudio_result_t error) { } } -// Read and process |num_frames| of data from the |audio_data| buffer. +// Read and process `num_frames` of data from the `audio_data` buffer. // TODO(henrika): possibly add trace here to be included in systrace. // See https://developer.android.com/studio/profile/systrace-commandline.html. aaudio_data_callback_result_t AAudioRecorder::OnDataCallback( @@ -154,14 +154,14 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback( int32_t num_frames) { // TODO(henrika): figure out why we sometimes hit this one. // RTC_DCHECK(thread_checker_aaudio_.IsCurrent()); - // RTC_LOG(INFO) << "OnDataCallback: " << num_frames; + // RTC_LOG(LS_INFO) << "OnDataCallback: " << num_frames; // Drain the input buffer at first callback to ensure that it does not // contain any old data. Will also ensure that the lowest possible latency // is obtained. if (first_data_callback_) { - RTC_LOG(INFO) << "--- First input data callback: " - "device id=" - << aaudio_.device_id(); + RTC_LOG(LS_INFO) << "--- First input data callback: " + "device id=" + << aaudio_.device_id(); aaudio_.ClearInputStream(audio_data, num_frames); first_data_callback_ = false; } @@ -177,10 +177,10 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback( latency_millis_ = aaudio_.EstimateLatencyMillis(); // TODO(henrika): use for development only. if (aaudio_.frames_read() % (1000 * aaudio_.frames_per_burst()) == 0) { - RTC_DLOG(INFO) << "input latency: " << latency_millis_ - << ", num_frames: " << num_frames; + RTC_DLOG(LS_INFO) << "input latency: " << latency_millis_ + << ", num_frames: " << num_frames; } - // Copy recorded audio in |audio_data| to the WebRTC sink using the + // Copy recorded audio in `audio_data` to the WebRTC sink using the // FineAudioBuffer object. fine_audio_buffer_->DeliverRecordedData( rtc::MakeArrayView(static_cast(audio_data), @@ -204,7 +204,7 @@ void AAudioRecorder::OnMessage(rtc::Message* msg) { void AAudioRecorder::HandleStreamDisconnected() { RTC_DCHECK_RUN_ON(&thread_checker_); - RTC_LOG(INFO) << "HandleStreamDisconnected"; + RTC_LOG(LS_INFO) << "HandleStreamDisconnected"; if (!initialized_ || !recording_) { return; } diff --git a/modules/audio_device/android/aaudio_recorder.h b/modules/audio_device/android/aaudio_recorder.h index bbf2cacf9b..d0ad6be43d 100644 --- a/modules/audio_device/android/aaudio_recorder.h +++ b/modules/audio_device/android/aaudio_recorder.h @@ -69,8 +69,8 @@ class AAudioRecorder : public AAudioObserverInterface, protected: // AAudioObserverInterface implementation. - // For an input stream, this function should read |num_frames| of recorded - // data, in the stream's current data format, from the |audio_data| buffer. + // For an input stream, this function should read `num_frames` of recorded + // data, in the stream's current data format, from the `audio_data` buffer. // Called on a real-time thread owned by AAudio. aaudio_data_callback_result_t OnDataCallback(void* audio_data, int32_t num_frames) override; diff --git a/modules/audio_device/android/aaudio_wrapper.cc b/modules/audio_device/android/aaudio_wrapper.cc index ab1278436e..3d824b5c57 100644 --- a/modules/audio_device/android/aaudio_wrapper.cc +++ b/modules/audio_device/android/aaudio_wrapper.cc @@ -91,8 +91,8 @@ void ErrorCallback(AAudioStream* stream, aaudio_result_t error) { RTC_DCHECK(user_data); AAudioWrapper* aaudio_wrapper = reinterpret_cast(user_data); - RTC_LOG(WARNING) << "ErrorCallback: " - << DirectionToString(aaudio_wrapper->direction()); + RTC_LOG(LS_WARNING) << "ErrorCallback: " + << DirectionToString(aaudio_wrapper->direction()); RTC_DCHECK(aaudio_wrapper->observer()); aaudio_wrapper->observer()->OnErrorCallback(error); } @@ -134,23 +134,23 @@ AAudioWrapper::AAudioWrapper(AudioManager* audio_manager, aaudio_direction_t direction, AAudioObserverInterface* observer) : direction_(direction), observer_(observer) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; RTC_DCHECK(observer_); direction_ == AAUDIO_DIRECTION_OUTPUT ? audio_parameters_ = audio_manager->GetPlayoutAudioParameters() : audio_parameters_ = audio_manager->GetRecordAudioParameters(); aaudio_thread_checker_.Detach(); - RTC_LOG(INFO) << audio_parameters_.ToString(); + RTC_LOG(LS_INFO) << audio_parameters_.ToString(); } AAudioWrapper::~AAudioWrapper() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!stream_); } bool AAudioWrapper::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; RTC_DCHECK(thread_checker_.IsCurrent()); // Creates a stream builder which can be used to open an audio stream. ScopedStreamBuilder builder; @@ -174,7 +174,7 @@ bool AAudioWrapper::Init() { } bool AAudioWrapper::Start() { - RTC_LOG(INFO) << "Start"; + RTC_LOG(LS_INFO) << "Start"; RTC_DCHECK(thread_checker_.IsCurrent()); // TODO(henrika): this state check might not be needed. aaudio_stream_state_t current_state = AAudioStream_getState(stream_); @@ -190,7 +190,7 @@ bool AAudioWrapper::Start() { } bool AAudioWrapper::Stop() { - RTC_LOG(INFO) << "Stop: " << DirectionToString(direction()); + RTC_LOG(LS_INFO) << "Stop: " << DirectionToString(direction()); RTC_DCHECK(thread_checker_.IsCurrent()); // Asynchronous request for the stream to stop. RETURN_ON_ERROR(AAudioStream_requestStop(stream_), false); @@ -240,7 +240,7 @@ double AAudioWrapper::EstimateLatencyMillis() const { // Returns new buffer size or a negative error value if buffer size could not // be increased. bool AAudioWrapper::IncreaseOutputBufferSize() { - RTC_LOG(INFO) << "IncreaseBufferSize"; + RTC_LOG(LS_INFO) << "IncreaseBufferSize"; RTC_DCHECK(stream_); RTC_DCHECK(aaudio_thread_checker_.IsCurrent()); RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_OUTPUT); @@ -255,20 +255,20 @@ bool AAudioWrapper::IncreaseOutputBufferSize() { << ") is higher than max: " << max_buffer_size; return false; } - RTC_LOG(INFO) << "Updating buffer size to: " << buffer_size - << " (max=" << max_buffer_size << ")"; + RTC_LOG(LS_INFO) << "Updating buffer size to: " << buffer_size + << " (max=" << max_buffer_size << ")"; buffer_size = AAudioStream_setBufferSizeInFrames(stream_, buffer_size); if (buffer_size < 0) { RTC_LOG(LS_ERROR) << "Failed to change buffer size: " << AAudio_convertResultToText(buffer_size); return false; } - RTC_LOG(INFO) << "Buffer size changed to: " << buffer_size; + RTC_LOG(LS_INFO) << "Buffer size changed to: " << buffer_size; return true; } void AAudioWrapper::ClearInputStream(void* audio_data, int32_t num_frames) { - RTC_LOG(INFO) << "ClearInputStream"; + RTC_LOG(LS_INFO) << "ClearInputStream"; RTC_DCHECK(stream_); RTC_DCHECK(aaudio_thread_checker_.IsCurrent()); RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_INPUT); @@ -357,7 +357,7 @@ int64_t AAudioWrapper::frames_read() const { } void AAudioWrapper::SetStreamConfiguration(AAudioStreamBuilder* builder) { - RTC_LOG(INFO) << "SetStreamConfiguration"; + RTC_LOG(LS_INFO) << "SetStreamConfiguration"; RTC_DCHECK(builder); RTC_DCHECK(thread_checker_.IsCurrent()); // Request usage of default primary output/input device. @@ -390,7 +390,7 @@ void AAudioWrapper::SetStreamConfiguration(AAudioStreamBuilder* builder) { } bool AAudioWrapper::OpenStream(AAudioStreamBuilder* builder) { - RTC_LOG(INFO) << "OpenStream"; + RTC_LOG(LS_INFO) << "OpenStream"; RTC_DCHECK(builder); AAudioStream* stream = nullptr; RETURN_ON_ERROR(AAudioStreamBuilder_openStream(builder, &stream), false); @@ -400,7 +400,7 @@ bool AAudioWrapper::OpenStream(AAudioStreamBuilder* builder) { } void AAudioWrapper::CloseStream() { - RTC_LOG(INFO) << "CloseStream"; + RTC_LOG(LS_INFO) << "CloseStream"; RTC_DCHECK(stream_); LOG_ON_ERROR(AAudioStream_close(stream_)); stream_ = nullptr; @@ -419,16 +419,16 @@ void AAudioWrapper::LogStreamConfiguration() { ss << ", direction=" << DirectionToString(direction()); ss << ", device id=" << AAudioStream_getDeviceId(stream_); ss << ", frames per callback=" << frames_per_callback(); - RTC_LOG(INFO) << ss.str(); + RTC_LOG(LS_INFO) << ss.str(); } void AAudioWrapper::LogStreamState() { - RTC_LOG(INFO) << "AAudio stream state: " - << AAudio_convertStreamStateToText(stream_state()); + RTC_LOG(LS_INFO) << "AAudio stream state: " + << AAudio_convertStreamStateToText(stream_state()); } bool AAudioWrapper::VerifyStreamConfiguration() { - RTC_LOG(INFO) << "VerifyStreamConfiguration"; + RTC_LOG(LS_INFO) << "VerifyStreamConfiguration"; RTC_DCHECK(stream_); // TODO(henrika): should we verify device ID as well? if (AAudioStream_getSampleRate(stream_) != audio_parameters().sample_rate()) { @@ -466,16 +466,16 @@ bool AAudioWrapper::VerifyStreamConfiguration() { } bool AAudioWrapper::OptimizeBuffers() { - RTC_LOG(INFO) << "OptimizeBuffers"; + RTC_LOG(LS_INFO) << "OptimizeBuffers"; RTC_DCHECK(stream_); // Maximum number of frames that can be filled without blocking. - RTC_LOG(INFO) << "max buffer capacity in frames: " - << buffer_capacity_in_frames(); + RTC_LOG(LS_INFO) << "max buffer capacity in frames: " + << buffer_capacity_in_frames(); // Query the number of frames that the application should read or write at // one time for optimal performance. int32_t frames_per_burst = AAudioStream_getFramesPerBurst(stream_); - RTC_LOG(INFO) << "frames per burst for optimal performance: " - << frames_per_burst; + RTC_LOG(LS_INFO) << "frames per burst for optimal performance: " + << frames_per_burst; frames_per_burst_ = frames_per_burst; if (direction() == AAUDIO_DIRECTION_INPUT) { // There is no point in calling setBufferSizeInFrames() for input streams @@ -492,7 +492,7 @@ bool AAudioWrapper::OptimizeBuffers() { return false; } // Maximum number of frames that can be filled without blocking. - RTC_LOG(INFO) << "buffer burst size in frames: " << buffer_size; + RTC_LOG(LS_INFO) << "buffer burst size in frames: " << buffer_size; return true; } diff --git a/modules/audio_device/android/audio_device_template.h b/modules/audio_device/android/audio_device_template.h index 3ea248f79e..999c5878c6 100644 --- a/modules/audio_device/android/audio_device_template.h +++ b/modules/audio_device/android/audio_device_template.h @@ -39,22 +39,22 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { output_(audio_manager_), input_(audio_manager_), initialized_(false) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_CHECK(audio_manager); audio_manager_->SetActiveAudioLayer(audio_layer); } - virtual ~AudioDeviceTemplate() { RTC_LOG(INFO) << __FUNCTION__; } + virtual ~AudioDeviceTemplate() { RTC_LOG(LS_INFO) << __FUNCTION__; } int32_t ActiveAudioLayer( AudioDeviceModule::AudioLayer& audioLayer) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; audioLayer = audio_layer_; return 0; } InitStatus Init() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!initialized_); if (!audio_manager_->Init()) { @@ -74,7 +74,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { } int32_t Terminate() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(thread_checker_.IsCurrent()); int32_t err = input_.Terminate(); err |= output_.Terminate(); @@ -85,18 +85,18 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { } bool Initialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(thread_checker_.IsCurrent()); return initialized_; } int16_t PlayoutDevices() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return 1; } int16_t RecordingDevices() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return 1; } @@ -115,7 +115,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { int32_t SetPlayoutDevice(uint16_t index) override { // OK to use but it has no effect currently since device selection is // done using Andoid APIs instead. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return 0; } @@ -127,7 +127,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { int32_t SetRecordingDevice(uint16_t index) override { // OK to use but it has no effect currently since device selection is // done using Andoid APIs instead. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return 0; } @@ -137,41 +137,41 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { } int32_t PlayoutIsAvailable(bool& available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; available = true; return 0; } int32_t InitPlayout() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_.InitPlayout(); } bool PlayoutIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_.PlayoutIsInitialized(); } int32_t RecordingIsAvailable(bool& available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; available = true; return 0; } int32_t InitRecording() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return input_.InitRecording(); } bool RecordingIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return input_.RecordingIsInitialized(); } int32_t StartPlayout() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!audio_manager_->IsCommunicationModeEnabled()) { - RTC_LOG(WARNING) + RTC_LOG(LS_WARNING) << "The application should use MODE_IN_COMMUNICATION audio mode!"; } return output_.StartPlayout(); @@ -181,20 +181,20 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { // Avoid using audio manger (JNI/Java cost) if playout was inactive. if (!Playing()) return 0; - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; int32_t err = output_.StopPlayout(); return err; } bool Playing() const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; return output_.Playing(); } int32_t StartRecording() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!audio_manager_->IsCommunicationModeEnabled()) { - RTC_LOG(WARNING) + RTC_LOG(LS_WARNING) << "The application should use MODE_IN_COMMUNICATION audio mode!"; } return input_.StartRecording(); @@ -202,7 +202,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { int32_t StopRecording() override { // Avoid using audio manger (JNI/Java cost) if recording was inactive. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!Recording()) return 0; int32_t err = input_.StopRecording(); @@ -212,47 +212,47 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { bool Recording() const override { return input_.Recording(); } int32_t InitSpeaker() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return 0; } bool SpeakerIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return true; } int32_t InitMicrophone() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return 0; } bool MicrophoneIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return true; } int32_t SpeakerVolumeIsAvailable(bool& available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_.SpeakerVolumeIsAvailable(available); } int32_t SetSpeakerVolume(uint32_t volume) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_.SetSpeakerVolume(volume); } int32_t SpeakerVolume(uint32_t& volume) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_.SpeakerVolume(volume); } int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_.MaxSpeakerVolume(maxVolume); } int32_t MinSpeakerVolume(uint32_t& minVolume) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return output_.MinSpeakerVolume(minVolume); } @@ -299,13 +299,13 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { // Returns true if the audio manager has been configured to support stereo // and false otherwised. Default is mono. int32_t StereoPlayoutIsAvailable(bool& available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; available = audio_manager_->IsStereoPlayoutSupported(); return 0; } int32_t SetStereoPlayout(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; bool available = audio_manager_->IsStereoPlayoutSupported(); // Android does not support changes between mono and stero on the fly. // Instead, the native audio layer is configured via the audio manager @@ -320,13 +320,13 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { } int32_t StereoRecordingIsAvailable(bool& available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; available = audio_manager_->IsStereoRecordSupported(); return 0; } int32_t SetStereoRecording(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; bool available = audio_manager_->IsStereoRecordSupported(); // Android does not support changes between mono and stero on the fly. // Instead, the native audio layer is configured via the audio manager @@ -336,7 +336,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { } int32_t StereoRecording(bool& enabled) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; enabled = audio_manager_->IsStereoRecordSupported(); return 0; } @@ -349,7 +349,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { } void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; output_.AttachAudioBuffer(audioBuffer); input_.AttachAudioBuffer(audioBuffer); } @@ -367,13 +367,13 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { // a "Not Implemented" log will be filed. This non-perfect state will remain // until I have added full support for audio effects based on OpenSL ES APIs. bool BuiltInAECIsAvailable() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return audio_manager_->IsAcousticEchoCancelerSupported(); } // TODO(henrika): add implementation for OpenSL ES based audio as well. int32_t EnableBuiltInAEC(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; RTC_CHECK(BuiltInAECIsAvailable()) << "HW AEC is not available"; return input_.EnableBuiltInAEC(enable); } @@ -383,13 +383,13 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { // TODO(henrika): add implementation for OpenSL ES based audio as well. // In addition, see comments for BuiltInAECIsAvailable(). bool BuiltInAGCIsAvailable() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return audio_manager_->IsAutomaticGainControlSupported(); } // TODO(henrika): add implementation for OpenSL ES based audio as well. int32_t EnableBuiltInAGC(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; RTC_CHECK(BuiltInAGCIsAvailable()) << "HW AGC is not available"; return input_.EnableBuiltInAGC(enable); } @@ -399,13 +399,13 @@ class AudioDeviceTemplate : public AudioDeviceGeneric { // TODO(henrika): add implementation for OpenSL ES based audio as well. // In addition, see comments for BuiltInAECIsAvailable(). bool BuiltInNSIsAvailable() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return audio_manager_->IsNoiseSuppressorSupported(); } // TODO(henrika): add implementation for OpenSL ES based audio as well. int32_t EnableBuiltInNS(bool enable) override { - RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; RTC_CHECK(BuiltInNSIsAvailable()) << "HW NS is not available"; return input_.EnableBuiltInNS(enable); } diff --git a/modules/audio_device/android/audio_device_unittest.cc b/modules/audio_device/android/audio_device_unittest.cc index 20c36c7c31..79cd69f2f1 100644 --- a/modules/audio_device/android/audio_device_unittest.cc +++ b/modules/audio_device/android/audio_device_unittest.cc @@ -68,7 +68,7 @@ static const int kFilePlayTimeInSec = 5; static const size_t kBitsPerSample = 16; static const size_t kBytesPerSample = kBitsPerSample / 8; // Run the full-duplex test during this time (unit is in seconds). -// Note that first |kNumIgnoreFirstCallbacks| are ignored. +// Note that first `kNumIgnoreFirstCallbacks` are ignored. static const int kFullDuplexTimeInSec = 5; // Wait for the callback sequence to stabilize by ignoring this amount of the // initial callbacks (avoids initial FIFO access). @@ -127,7 +127,7 @@ class FileAudioStream : public AudioStreamInterface { void Write(const void* source, size_t num_frames) override {} // Read samples from file stored in memory (at construction) and copy - // |num_frames| (<=> 10ms) to the |destination| byte buffer. + // `num_frames` (<=> 10ms) to the `destination` byte buffer. void Read(void* destination, size_t num_frames) override { memcpy(destination, static_cast(&file_[file_pos_]), num_frames * sizeof(int16_t)); @@ -171,7 +171,7 @@ class FifoAudioStream : public AudioStreamInterface { ~FifoAudioStream() { Flush(); } - // Allocate new memory, copy |num_frames| samples from |source| into memory + // Allocate new memory, copy `num_frames` samples from `source` into memory // and add pointer to the memory location to end of the list. // Increases the size of the FIFO by one element. void Write(const void* source, size_t num_frames) override { @@ -192,8 +192,8 @@ class FifoAudioStream : public AudioStreamInterface { total_written_elements_ += size; } - // Read pointer to data buffer from front of list, copy |num_frames| of stored - // data into |destination| and delete the utilized memory allocation. + // Read pointer to data buffer from front of list, copy `num_frames` of stored + // data into `destination` and delete the utilized memory allocation. // Decreases the size of the FIFO by one element. void Read(void* destination, size_t num_frames) override { ASSERT_EQ(num_frames, frames_per_buffer_); @@ -251,7 +251,7 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface { rec_count_(0), pulse_time_(0) {} - // Insert periodic impulses in first two samples of |destination|. + // Insert periodic impulses in first two samples of `destination`. void Read(void* destination, size_t num_frames) override { ASSERT_EQ(num_frames, frames_per_buffer_); if (play_count_ == 0) { @@ -272,14 +272,14 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface { } } - // Detect received impulses in |source|, derive time between transmission and + // Detect received impulses in `source`, derive time between transmission and // detection and add the calculated delay to list of latencies. void Write(const void* source, size_t num_frames) override { ASSERT_EQ(num_frames, frames_per_buffer_); rec_count_++; if (pulse_time_ == 0) { // Avoid detection of new impulse response until a new impulse has - // been transmitted (sets |pulse_time_| to value larger than zero). + // been transmitted (sets `pulse_time_` to value larger than zero). return; } const int16_t* ptr16 = static_cast(source); @@ -298,7 +298,7 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface { // Total latency is the difference between transmit time and detection // tome plus the extra delay within the buffer in which we detected the // received impulse. It is transmitted at sample 0 but can be received - // at sample N where N > 0. The term |extra_delay| accounts for N and it + // at sample N where N > 0. The term `extra_delay` accounts for N and it // is a value between 0 and 10ms. latencies_.push_back(now_time - pulse_time_ + extra_delay); pulse_time_ = 0; @@ -892,7 +892,7 @@ TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) { EXPECT_CALL( mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(), kBytesPerSample, record_channels(), - record_sample_rate(), _, 0, 0, false, _)) + record_sample_rate(), _, 0, 0, false, _, _)) .Times(AtLeast(kNumCallbacks)); EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock)); @@ -913,7 +913,7 @@ TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) { EXPECT_CALL( mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(), kBytesPerSample, record_channels(), - record_sample_rate(), _, 0, 0, false, _)) + record_sample_rate(), _, 0, 0, false, _, _)) .Times(AtLeast(kNumCallbacks)); EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock)); StartPlayout(); diff --git a/modules/audio_device/android/audio_manager.cc b/modules/audio_device/android/audio_manager.cc index 9c8137b59e..0b55496619 100644 --- a/modules/audio_device/android/audio_manager.cc +++ b/modules/audio_device/android/audio_manager.cc @@ -33,11 +33,11 @@ AudioManager::JavaAudioManager::JavaAudioManager( is_device_blacklisted_for_open_sles_usage_( native_reg->GetMethodId("isDeviceBlacklistedForOpenSLESUsage", "()Z")) { - RTC_LOG(INFO) << "JavaAudioManager::ctor"; + RTC_LOG(LS_INFO) << "JavaAudioManager::ctor"; } AudioManager::JavaAudioManager::~JavaAudioManager() { - RTC_LOG(INFO) << "JavaAudioManager::~dtor"; + RTC_LOG(LS_INFO) << "JavaAudioManager::~dtor"; } bool AudioManager::JavaAudioManager::Init() { @@ -68,7 +68,7 @@ AudioManager::AudioManager() low_latency_playout_(false), low_latency_record_(false), delay_estimate_in_milliseconds_(0) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; RTC_CHECK(j_environment_); JNINativeMethod native_methods[] = { {"nativeCacheAudioParameters", "(IIIZZZZZZZIIJ)V", @@ -83,14 +83,14 @@ AudioManager::AudioManager() } AudioManager::~AudioManager() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); Close(); } void AudioManager::SetActiveAudioLayer( AudioDeviceModule::AudioLayer audio_layer) { - RTC_LOG(INFO) << "SetActiveAudioLayer: " << audio_layer; + RTC_LOG(LS_INFO) << "SetActiveAudioLayer: " << audio_layer; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!initialized_); // Store the currently utilized audio layer. @@ -98,23 +98,23 @@ void AudioManager::SetActiveAudioLayer( // The delay estimate can take one of two fixed values depending on if the // device supports low-latency output or not. However, it is also possible // that the user explicitly selects the high-latency audio path, hence we use - // the selected |audio_layer| here to set the delay estimate. + // the selected `audio_layer` here to set the delay estimate. delay_estimate_in_milliseconds_ = (audio_layer == AudioDeviceModule::kAndroidJavaAudio) ? kHighLatencyModeDelayEstimateInMilliseconds : kLowLatencyModeDelayEstimateInMilliseconds; - RTC_LOG(INFO) << "delay_estimate_in_milliseconds: " - << delay_estimate_in_milliseconds_; + RTC_LOG(LS_INFO) << "delay_estimate_in_milliseconds: " + << delay_estimate_in_milliseconds_; } SLObjectItf AudioManager::GetOpenSLEngine() { - RTC_LOG(INFO) << "GetOpenSLEngine"; + RTC_LOG(LS_INFO) << "GetOpenSLEngine"; RTC_DCHECK(thread_checker_.IsCurrent()); // Only allow usage of OpenSL ES if such an audio layer has been specified. if (audio_layer_ != AudioDeviceModule::kAndroidOpenSLESAudio && audio_layer_ != AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio) { - RTC_LOG(INFO) + RTC_LOG(LS_INFO) << "Unable to create OpenSL engine for the current audio layer: " << audio_layer_; return nullptr; @@ -123,7 +123,8 @@ SLObjectItf AudioManager::GetOpenSLEngine() { // If one already has been created, return existing object instead of // creating a new. if (engine_object_.Get() != nullptr) { - RTC_LOG(WARNING) << "The OpenSL ES engine object has already been created"; + RTC_LOG(LS_WARNING) + << "The OpenSL ES engine object has already been created"; return engine_object_.Get(); } // Create the engine object in thread safe mode. @@ -149,7 +150,7 @@ SLObjectItf AudioManager::GetOpenSLEngine() { } bool AudioManager::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!initialized_); RTC_DCHECK_NE(audio_layer_, AudioDeviceModule::kPlatformDefaultAudio); @@ -162,7 +163,7 @@ bool AudioManager::Init() { } bool AudioManager::Close() { - RTC_LOG(INFO) << "Close"; + RTC_LOG(LS_INFO) << "Close"; RTC_DCHECK(thread_checker_.IsCurrent()); if (!initialized_) return true; @@ -273,7 +274,7 @@ void AudioManager::OnCacheAudioParameters(JNIEnv* env, jboolean a_audio, jint output_buffer_size, jint input_buffer_size) { - RTC_LOG(INFO) + RTC_LOG(LS_INFO) << "OnCacheAudioParameters: " "hardware_aec: " << static_cast(hardware_aec) diff --git a/modules/audio_device/android/audio_record_jni.cc b/modules/audio_device/android/audio_record_jni.cc index a3aa85565a..9d7bf73097 100644 --- a/modules/audio_device/android/audio_record_jni.cc +++ b/modules/audio_device/android/audio_record_jni.cc @@ -34,7 +34,7 @@ class ScopedHistogramTimer { ~ScopedHistogramTimer() { const int64_t life_time_ms = rtc::TimeSince(start_time_ms_); RTC_HISTOGRAM_COUNTS_1000(histogram_name_, life_time_ms); - RTC_LOG(INFO) << histogram_name_ << ": " << life_time_ms; + RTC_LOG(LS_INFO) << histogram_name_ << ": " << life_time_ms; } private: @@ -93,7 +93,7 @@ AudioRecordJni::AudioRecordJni(AudioManager* audio_manager) initialized_(false), recording_(false), audio_device_buffer_(nullptr) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; RTC_DCHECK(audio_parameters_.is_valid()); RTC_CHECK(j_environment_); JNINativeMethod native_methods[] = { @@ -115,26 +115,26 @@ AudioRecordJni::AudioRecordJni(AudioManager* audio_manager) } AudioRecordJni::~AudioRecordJni() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); Terminate(); } int32_t AudioRecordJni::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; RTC_DCHECK(thread_checker_.IsCurrent()); return 0; } int32_t AudioRecordJni::Terminate() { - RTC_LOG(INFO) << "Terminate"; + RTC_LOG(LS_INFO) << "Terminate"; RTC_DCHECK(thread_checker_.IsCurrent()); StopRecording(); return 0; } int32_t AudioRecordJni::InitRecording() { - RTC_LOG(INFO) << "InitRecording"; + RTC_LOG(LS_INFO) << "InitRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!initialized_); RTC_DCHECK(!recording_); @@ -147,7 +147,7 @@ int32_t AudioRecordJni::InitRecording() { return -1; } frames_per_buffer_ = static_cast(frames_per_buffer); - RTC_LOG(INFO) << "frames_per_buffer: " << frames_per_buffer_; + RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_; const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t); RTC_CHECK_EQ(direct_buffer_capacity_in_bytes_, frames_per_buffer_ * bytes_per_frame); @@ -157,7 +157,7 @@ int32_t AudioRecordJni::InitRecording() { } int32_t AudioRecordJni::StartRecording() { - RTC_LOG(INFO) << "StartRecording"; + RTC_LOG(LS_INFO) << "StartRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!recording_); if (!initialized_) { @@ -175,7 +175,7 @@ int32_t AudioRecordJni::StartRecording() { } int32_t AudioRecordJni::StopRecording() { - RTC_LOG(INFO) << "StopRecording"; + RTC_LOG(LS_INFO) << "StopRecording"; RTC_DCHECK(thread_checker_.IsCurrent()); if (!initialized_ || !recording_) { return 0; @@ -195,24 +195,24 @@ int32_t AudioRecordJni::StopRecording() { } void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { - RTC_LOG(INFO) << "AttachAudioBuffer"; + RTC_LOG(LS_INFO) << "AttachAudioBuffer"; RTC_DCHECK(thread_checker_.IsCurrent()); audio_device_buffer_ = audioBuffer; const int sample_rate_hz = audio_parameters_.sample_rate(); - RTC_LOG(INFO) << "SetRecordingSampleRate(" << sample_rate_hz << ")"; + RTC_LOG(LS_INFO) << "SetRecordingSampleRate(" << sample_rate_hz << ")"; audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz); const size_t channels = audio_parameters_.channels(); - RTC_LOG(INFO) << "SetRecordingChannels(" << channels << ")"; + RTC_LOG(LS_INFO) << "SetRecordingChannels(" << channels << ")"; audio_device_buffer_->SetRecordingChannels(channels); total_delay_in_milliseconds_ = audio_manager_->GetDelayEstimateInMilliseconds(); RTC_DCHECK_GT(total_delay_in_milliseconds_, 0); - RTC_LOG(INFO) << "total_delay_in_milliseconds: " - << total_delay_in_milliseconds_; + RTC_LOG(LS_INFO) << "total_delay_in_milliseconds: " + << total_delay_in_milliseconds_; } int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) { - RTC_LOG(INFO) << "EnableBuiltInAEC(" << enable << ")"; + RTC_LOG(LS_INFO) << "EnableBuiltInAEC(" << enable << ")"; RTC_DCHECK(thread_checker_.IsCurrent()); return j_audio_record_->EnableBuiltInAEC(enable) ? 0 : -1; } @@ -223,7 +223,7 @@ int32_t AudioRecordJni::EnableBuiltInAGC(bool enable) { } int32_t AudioRecordJni::EnableBuiltInNS(bool enable) { - RTC_LOG(INFO) << "EnableBuiltInNS(" << enable << ")"; + RTC_LOG(LS_INFO) << "EnableBuiltInNS(" << enable << ")"; RTC_DCHECK(thread_checker_.IsCurrent()); return j_audio_record_->EnableBuiltInNS(enable) ? 0 : -1; } @@ -240,12 +240,12 @@ void JNICALL AudioRecordJni::CacheDirectBufferAddress(JNIEnv* env, void AudioRecordJni::OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer) { - RTC_LOG(INFO) << "OnCacheDirectBufferAddress"; + RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!direct_buffer_address_); direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer); jlong capacity = env->GetDirectBufferCapacity(byte_buffer); - RTC_LOG(INFO) << "direct buffer capacity: " << capacity; + RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity; direct_buffer_capacity_in_bytes_ = static_cast(capacity); } @@ -270,11 +270,11 @@ void AudioRecordJni::OnDataIsRecorded(int length) { audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_, frames_per_buffer_); // We provide one (combined) fixed delay estimate for the APM and use the - // |playDelayMs| parameter only. Components like the AEC only sees the sum - // of |playDelayMs| and |recDelayMs|, hence the distributions does not matter. + // `playDelayMs` parameter only. Components like the AEC only sees the sum + // of `playDelayMs` and `recDelayMs`, hence the distributions does not matter. audio_device_buffer_->SetVQEData(total_delay_in_milliseconds_, 0); if (audio_device_buffer_->DeliverRecordedData() == -1) { - RTC_LOG(INFO) << "AudioDeviceBuffer::DeliverRecordedData failed"; + RTC_LOG(LS_INFO) << "AudioDeviceBuffer::DeliverRecordedData failed"; } } diff --git a/modules/audio_device/android/audio_record_jni.h b/modules/audio_device/android/audio_record_jni.h index c445360d6c..66a6a89f41 100644 --- a/modules/audio_device/android/audio_record_jni.h +++ b/modules/audio_device/android/audio_record_jni.h @@ -87,8 +87,8 @@ class AudioRecordJni { private: // Called from Java side so we can cache the address of the Java-manged - // |byte_buffer| in |direct_buffer_address_|. The size of the buffer - // is also stored in |direct_buffer_capacity_in_bytes_|. + // `byte_buffer` in `direct_buffer_address_`. The size of the buffer + // is also stored in `direct_buffer_capacity_in_bytes_`. // This method will be called by the WebRtcAudioRecord constructor, i.e., // on the same thread that this object is created on. static void JNICALL CacheDirectBufferAddress(JNIEnv* env, @@ -98,8 +98,8 @@ class AudioRecordJni { void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer); // Called periodically by the Java based WebRtcAudioRecord object when - // recording has started. Each call indicates that there are |length| new - // bytes recorded in the memory area |direct_buffer_address_| and it is + // recording has started. Each call indicates that there are `length` new + // bytes recorded in the memory area `direct_buffer_address_` and it is // now time to send these to the consumer. // This method is called on a high-priority thread from Java. The name of // the thread is 'AudioRecordThread'. @@ -142,10 +142,10 @@ class AudioRecordJni { // possible values. See audio_common.h for details. int total_delay_in_milliseconds_; - // Cached copy of address to direct audio buffer owned by |j_audio_record_|. + // Cached copy of address to direct audio buffer owned by `j_audio_record_`. void* direct_buffer_address_; - // Number of bytes in the direct audio buffer owned by |j_audio_record_|. + // Number of bytes in the direct audio buffer owned by `j_audio_record_`. size_t direct_buffer_capacity_in_bytes_; // Number audio frames per audio buffer. Each audio frame corresponds to diff --git a/modules/audio_device/android/audio_track_jni.cc b/modules/audio_device/android/audio_track_jni.cc index daaeeca1ea..178ccadfdb 100644 --- a/modules/audio_device/android/audio_track_jni.cc +++ b/modules/audio_device/android/audio_track_jni.cc @@ -103,7 +103,7 @@ AudioTrackJni::AudioTrackJni(AudioManager* audio_manager) initialized_(false), playing_(false), audio_device_buffer_(nullptr) { - RTC_LOG(INFO) << "ctor"; + RTC_LOG(LS_INFO) << "ctor"; RTC_DCHECK(audio_parameters_.is_valid()); RTC_CHECK(j_environment_); JNINativeMethod native_methods[] = { @@ -125,26 +125,26 @@ AudioTrackJni::AudioTrackJni(AudioManager* audio_manager) } AudioTrackJni::~AudioTrackJni() { - RTC_LOG(INFO) << "dtor"; + RTC_LOG(LS_INFO) << "dtor"; RTC_DCHECK(thread_checker_.IsCurrent()); Terminate(); } int32_t AudioTrackJni::Init() { - RTC_LOG(INFO) << "Init"; + RTC_LOG(LS_INFO) << "Init"; RTC_DCHECK(thread_checker_.IsCurrent()); return 0; } int32_t AudioTrackJni::Terminate() { - RTC_LOG(INFO) << "Terminate"; + RTC_LOG(LS_INFO) << "Terminate"; RTC_DCHECK(thread_checker_.IsCurrent()); StopPlayout(); return 0; } int32_t AudioTrackJni::InitPlayout() { - RTC_LOG(INFO) << "InitPlayout"; + RTC_LOG(LS_INFO) << "InitPlayout"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!initialized_); RTC_DCHECK(!playing_); @@ -158,7 +158,7 @@ int32_t AudioTrackJni::InitPlayout() { } int32_t AudioTrackJni::StartPlayout() { - RTC_LOG(INFO) << "StartPlayout"; + RTC_LOG(LS_INFO) << "StartPlayout"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!playing_); if (!initialized_) { @@ -175,7 +175,7 @@ int32_t AudioTrackJni::StartPlayout() { } int32_t AudioTrackJni::StopPlayout() { - RTC_LOG(INFO) << "StopPlayout"; + RTC_LOG(LS_INFO) << "StopPlayout"; RTC_DCHECK(thread_checker_.IsCurrent()); if (!initialized_ || !playing_) { return 0; @@ -200,7 +200,7 @@ int AudioTrackJni::SpeakerVolumeIsAvailable(bool& available) { } int AudioTrackJni::SetSpeakerVolume(uint32_t volume) { - RTC_LOG(INFO) << "SetSpeakerVolume(" << volume << ")"; + RTC_LOG(LS_INFO) << "SetSpeakerVolume(" << volume << ")"; RTC_DCHECK(thread_checker_.IsCurrent()); return j_audio_track_->SetStreamVolume(volume) ? 0 : -1; } @@ -220,20 +220,20 @@ int AudioTrackJni::MinSpeakerVolume(uint32_t& min_volume) const { int AudioTrackJni::SpeakerVolume(uint32_t& volume) const { RTC_DCHECK(thread_checker_.IsCurrent()); volume = j_audio_track_->GetStreamVolume(); - RTC_LOG(INFO) << "SpeakerVolume: " << volume; + RTC_LOG(LS_INFO) << "SpeakerVolume: " << volume; return 0; } // TODO(henrika): possibly add stereo support. void AudioTrackJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { - RTC_LOG(INFO) << "AttachAudioBuffer"; + RTC_LOG(LS_INFO) << "AttachAudioBuffer"; RTC_DCHECK(thread_checker_.IsCurrent()); audio_device_buffer_ = audioBuffer; const int sample_rate_hz = audio_parameters_.sample_rate(); - RTC_LOG(INFO) << "SetPlayoutSampleRate(" << sample_rate_hz << ")"; + RTC_LOG(LS_INFO) << "SetPlayoutSampleRate(" << sample_rate_hz << ")"; audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz); const size_t channels = audio_parameters_.channels(); - RTC_LOG(INFO) << "SetPlayoutChannels(" << channels << ")"; + RTC_LOG(LS_INFO) << "SetPlayoutChannels(" << channels << ")"; audio_device_buffer_->SetPlayoutChannels(channels); } @@ -249,16 +249,16 @@ void JNICALL AudioTrackJni::CacheDirectBufferAddress(JNIEnv* env, void AudioTrackJni::OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer) { - RTC_LOG(INFO) << "OnCacheDirectBufferAddress"; + RTC_LOG(LS_INFO) << "OnCacheDirectBufferAddress"; RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(!direct_buffer_address_); direct_buffer_address_ = env->GetDirectBufferAddress(byte_buffer); jlong capacity = env->GetDirectBufferCapacity(byte_buffer); - RTC_LOG(INFO) << "direct buffer capacity: " << capacity; + RTC_LOG(LS_INFO) << "direct buffer capacity: " << capacity; direct_buffer_capacity_in_bytes_ = static_cast(capacity); const size_t bytes_per_frame = audio_parameters_.channels() * sizeof(int16_t); frames_per_buffer_ = direct_buffer_capacity_in_bytes_ / bytes_per_frame; - RTC_LOG(INFO) << "frames_per_buffer: " << frames_per_buffer_; + RTC_LOG(LS_INFO) << "frames_per_buffer: " << frames_per_buffer_; } JNI_FUNCTION_ALIGN diff --git a/modules/audio_device/android/audio_track_jni.h b/modules/audio_device/android/audio_track_jni.h index 62bcba42b0..7eb69082b4 100644 --- a/modules/audio_device/android/audio_track_jni.h +++ b/modules/audio_device/android/audio_track_jni.h @@ -88,8 +88,8 @@ class AudioTrackJni { private: // Called from Java side so we can cache the address of the Java-manged - // |byte_buffer| in |direct_buffer_address_|. The size of the buffer - // is also stored in |direct_buffer_capacity_in_bytes_|. + // `byte_buffer` in `direct_buffer_address_`. The size of the buffer + // is also stored in `direct_buffer_capacity_in_bytes_`. // Called on the same thread as the creating thread. static void JNICALL CacheDirectBufferAddress(JNIEnv* env, jobject obj, @@ -98,8 +98,8 @@ class AudioTrackJni { void OnCacheDirectBufferAddress(JNIEnv* env, jobject byte_buffer); // Called periodically by the Java based WebRtcAudioTrack object when - // playout has started. Each call indicates that |length| new bytes should - // be written to the memory area |direct_buffer_address_| for playout. + // playout has started. Each call indicates that `length` new bytes should + // be written to the memory area `direct_buffer_address_` for playout. // This method is called on a high-priority thread from Java. The name of // the thread is 'AudioTrackThread'. static void JNICALL GetPlayoutData(JNIEnv* env, @@ -133,10 +133,10 @@ class AudioTrackJni { // AudioManager. const AudioParameters audio_parameters_; - // Cached copy of address to direct audio buffer owned by |j_audio_track_|. + // Cached copy of address to direct audio buffer owned by `j_audio_track_`. void* direct_buffer_address_; - // Number of bytes in the direct audio buffer owned by |j_audio_track_|. + // Number of bytes in the direct audio buffer owned by `j_audio_track_`. size_t direct_buffer_capacity_in_bytes_; // Number of audio frames per audio buffer. Each audio frame corresponds to diff --git a/modules/audio_device/android/build_info.h b/modules/audio_device/android/build_info.h index 2f270933e1..3647e56649 100644 --- a/modules/audio_device/android/build_info.h +++ b/modules/audio_device/android/build_info.h @@ -64,7 +64,7 @@ class BuildInfo { SdkCode GetSdkVersion(); private: - // Helper method which calls a static getter method with |name| and returns + // Helper method which calls a static getter method with `name` and returns // a string from Java. std::string GetStringFromJava(const char* name); diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java index 5efc813647..11ed669c3e 100644 --- a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java +++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java @@ -15,7 +15,7 @@ import android.media.audiofx.AudioEffect.Descriptor; import android.media.audiofx.NoiseSuppressor; import android.os.Build; -import android.support.annotation.Nullable; +import androidx.annotation.Nullable; import java.util.List; import java.util.UUID; import org.webrtc.Logging; @@ -23,7 +23,7 @@ // This class wraps control of three different platform effects. Supported // effects are: AcousticEchoCanceler (AEC) and NoiseSuppressor (NS). // Calling enable() will active all effects that are -// supported by the device if the corresponding |shouldEnableXXX| member is set. +// supported by the device if the corresponding `shouldEnableXXX` member is set. public class WebRtcAudioEffects { private static final boolean DEBUG = false; @@ -162,7 +162,7 @@ private WebRtcAudioEffects() { } // Call this method to enable or disable the platform AEC. It modifies - // |shouldEnableAec| which is used in enable() where the actual state + // `shouldEnableAec` which is used in enable() where the actual state // of the AEC effect is modified. Returns true if HW AEC is supported and // false otherwise. public boolean setAEC(boolean enable) { @@ -181,7 +181,7 @@ public boolean setAEC(boolean enable) { } // Call this method to enable or disable the platform NS. It modifies - // |shouldEnableNs| which is used in enable() where the actual state + // `shouldEnableNs` which is used in enable() where the actual state // of the NS effect is modified. Returns true if HW NS is supported and // false otherwise. public boolean setNS(boolean enable) { @@ -269,7 +269,7 @@ public void release() { } } - // Returns true for effect types in |type| that are of "VoIP" types: + // Returns true for effect types in `type` that are of "VoIP" types: // Acoustic Echo Canceler (AEC) or Automatic Gain Control (AGC) or // Noise Suppressor (NS). Note that, an extra check for support is needed // in each comparison since some devices includes effects in the @@ -306,7 +306,7 @@ private static void assertTrue(boolean condition) { } // Returns true if an effect of the specified type is available. Functionally - // equivalent to (NoiseSuppressor|AutomaticGainControl|...).isAvailable(), but + // equivalent to (NoiseSuppressor`AutomaticGainControl`...).isAvailable(), but // faster as it avoids the expensive OS call to enumerate effects. private static boolean isEffectTypeAvailable(UUID effectType) { Descriptor[] effects = getAvailableEffects(); diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java index 2783a0d044..b057c3a454 100644 --- a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java +++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java @@ -17,7 +17,7 @@ import android.media.AudioRecord; import android.media.AudioTrack; import android.os.Build; -import android.support.annotation.Nullable; +import androidx.annotation.Nullable; import java.util.Timer; import java.util.TimerTask; import org.webrtc.ContextUtils; diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java index c712a32c90..8eab01cd69 100644 --- a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java +++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java @@ -15,7 +15,7 @@ import android.media.MediaRecorder.AudioSource; import android.os.Build; import android.os.Process; -import android.support.annotation.Nullable; +import androidx.annotation.Nullable; import java.lang.System; import java.nio.ByteBuffer; import java.util.Arrays; @@ -366,7 +366,7 @@ private static int getDefaultAudioSource() { return AudioSource.VOICE_COMMUNICATION; } - // Sets all recorded samples to zero if |mute| is true, i.e., ensures that + // Sets all recorded samples to zero if `mute` is true, i.e., ensures that // the microphone is muted. public static void setMicrophoneMute(boolean mute) { Logging.w(TAG, "setMicrophoneMute(" + mute + ")"); diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java index 7e6ad5acf4..e3988e1a36 100644 --- a/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java +++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java @@ -18,7 +18,7 @@ import android.media.AudioTrack; import android.os.Build; import android.os.Process; -import android.support.annotation.Nullable; +import androidx.annotation.Nullable; import java.lang.Thread; import java.nio.ByteBuffer; import org.webrtc.ContextUtils; @@ -78,7 +78,7 @@ private static int getDefaultUsageAttribute() { private @Nullable AudioTrack audioTrack; private @Nullable AudioTrackThread audioThread; - // Samples to be played are replaced by zeros if |speakerMute| is set to true. + // Samples to be played are replaced by zeros if `speakerMute` is set to true. // Can be used to ensure that the speaker is fully muted. private static volatile boolean speakerMute; private byte[] emptyBytes; @@ -239,9 +239,9 @@ private int initPlayout(int sampleRate, int channels, double bufferSizeFactor) { Logging.d(TAG, "minBufferSizeInBytes: " + minBufferSizeInBytes); // For the streaming mode, data must be written to the audio sink in // chunks of size (given by byteBuffer.capacity()) less than or equal - // to the total buffer size |minBufferSizeInBytes|. But, we have seen + // to the total buffer size `minBufferSizeInBytes`. But, we have seen // reports of "getMinBufferSize(): error querying hardware". Hence, it - // can happen that |minBufferSizeInBytes| contains an invalid value. + // can happen that `minBufferSizeInBytes` contains an invalid value. if (minBufferSizeInBytes < byteBuffer.capacity()) { reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value."); return -1; @@ -481,7 +481,7 @@ private int channelCountToConfiguration(int channels) { private native void nativeGetPlayoutData(int bytes, long nativeAudioRecord); - // Sets all samples to be played out to zero if |mute| is true, i.e., + // Sets all samples to be played out to zero if `mute` is true, i.e., // ensures that the speaker is muted. public static void setSpeakerMute(boolean mute) { Logging.w(TAG, "setSpeakerMute(" + mute + ")"); diff --git a/modules/audio_device/android/opensles_player.h b/modules/audio_device/android/opensles_player.h index 78af29b6b6..41593a448f 100644 --- a/modules/audio_device/android/opensles_player.h +++ b/modules/audio_device/android/opensles_player.h @@ -86,7 +86,7 @@ class OpenSLESPlayer { // Reads audio data in PCM format using the AudioDeviceBuffer. // Can be called both on the main thread (during Start()) and from the // internal audio thread while output streaming is active. - // If the |silence| flag is set, the audio is filled with zeros instead of + // If the `silence` flag is set, the audio is filled with zeros instead of // asking the WebRTC layer for real audio data. This procedure is also known // as audio priming. void EnqueuePlayoutData(bool silence); @@ -97,7 +97,7 @@ class OpenSLESPlayer { // Obtaines the SL Engine Interface from the existing global Engine object. // The interface exposes creation methods of all the OpenSL ES object types. - // This method defines the |engine_| member variable. + // This method defines the `engine_` member variable. bool ObtainEngineInterface(); // Creates/destroys the output mix object. diff --git a/modules/audio_device/android/opensles_recorder.h b/modules/audio_device/android/opensles_recorder.h index 5f975d7242..e659c3c157 100644 --- a/modules/audio_device/android/opensles_recorder.h +++ b/modules/audio_device/android/opensles_recorder.h @@ -83,7 +83,7 @@ class OpenSLESRecorder { private: // Obtaines the SL Engine Interface from the existing global Engine object. // The interface exposes creation methods of all the OpenSL ES object types. - // This method defines the |engine_| member variable. + // This method defines the `engine_` member variable. bool ObtainEngineInterface(); // Creates/destroys the audio recorder and the simple-buffer queue object. @@ -104,7 +104,7 @@ class OpenSLESRecorder { // Wraps calls to SLAndroidSimpleBufferQueueState::Enqueue() and it can be // called both on the main thread (but before recording has started) and from // the internal audio thread while input streaming is active. It uses - // |simple_buffer_queue_| but no lock is needed since the initial calls from + // `simple_buffer_queue_` but no lock is needed since the initial calls from // the main thread and the native callback thread are mutually exclusive. bool EnqueueAudioBuffer(); diff --git a/modules/audio_device/audio_device_buffer.cc b/modules/audio_device/audio_device_buffer.cc index 977045419a..873e5d6088 100644 --- a/modules/audio_device/audio_device_buffer.cc +++ b/modules/audio_device/audio_device_buffer.cc @@ -54,6 +54,7 @@ AudioDeviceBuffer::AudioDeviceBuffer(TaskQueueFactory* task_queue_factory) typing_status_(false), play_delay_ms_(0), rec_delay_ms_(0), + capture_timestamp_ns_(0), num_stat_reports_(0), last_timer_task_time_(0), rec_stat_count_(0), @@ -61,10 +62,10 @@ AudioDeviceBuffer::AudioDeviceBuffer(TaskQueueFactory* task_queue_factory) play_start_time_(0), only_silence_recorded_(true), log_stats_(false) { - RTC_LOG(INFO) << "AudioDeviceBuffer::ctor"; + RTC_LOG(LS_INFO) << "AudioDeviceBuffer::ctor"; #ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE phase_ = 0.0; - RTC_LOG(WARNING) << "AUDIO_DEVICE_PLAYS_SINUS_TONE is defined!"; + RTC_LOG(LS_WARNING) << "AUDIO_DEVICE_PLAYS_SINUS_TONE is defined!"; #endif } @@ -72,13 +73,13 @@ AudioDeviceBuffer::~AudioDeviceBuffer() { RTC_DCHECK_RUN_ON(&main_thread_checker_); RTC_DCHECK(!playing_); RTC_DCHECK(!recording_); - RTC_LOG(INFO) << "AudioDeviceBuffer::~dtor"; + RTC_LOG(LS_INFO) << "AudioDeviceBuffer::~dtor"; } int32_t AudioDeviceBuffer::RegisterAudioCallback( AudioTransport* audio_callback) { RTC_DCHECK_RUN_ON(&main_thread_checker_); - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (playing_ || recording_) { RTC_LOG(LS_ERROR) << "Failed to set audio transport since media was active"; return -1; @@ -95,7 +96,7 @@ void AudioDeviceBuffer::StartPlayout() { if (playing_) { return; } - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // Clear members tracking playout stats and do it on the task queue. task_queue_.PostTask([this] { ResetPlayStats(); }); // Start a periodic timer based on task queue if not already done by the @@ -114,7 +115,7 @@ void AudioDeviceBuffer::StartRecording() { if (recording_) { return; } - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // Clear members tracking recording stats and do it on the task queue. task_queue_.PostTask([this] { ResetRecStats(); }); // Start a periodic timer based on task queue if not already done by the @@ -136,13 +137,14 @@ void AudioDeviceBuffer::StopPlayout() { if (!playing_) { return; } - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; playing_ = false; // Stop periodic logging if no more media is active. if (!recording_) { StopPeriodicLogging(); } - RTC_LOG(INFO) << "total playout time: " << rtc::TimeSince(play_start_time_); + RTC_LOG(LS_INFO) << "total playout time: " + << rtc::TimeSince(play_start_time_); } void AudioDeviceBuffer::StopRecording() { @@ -150,7 +152,7 @@ void AudioDeviceBuffer::StopRecording() { if (!recording_) { return; } - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; recording_ = false; // Stop periodic logging if no more media is active. if (!playing_) { @@ -160,30 +162,30 @@ void AudioDeviceBuffer::StopRecording() { // recorded. Measurements (max of absolute level) are taken twice per second, // which means that if e.g 10 seconds of audio has been recorded, a total of // 20 level estimates must all be identical to zero to trigger the histogram. - // |only_silence_recorded_| can only be cleared on the native audio thread + // `only_silence_recorded_` can only be cleared on the native audio thread // that drives audio capture but we know by design that the audio has stopped // when this method is called, hence there should not be aby conflicts. Also, - // the fact that |only_silence_recorded_| can be affected during the complete + // the fact that `only_silence_recorded_` can be affected during the complete // call makes chances of conflicts with potentially one last callback very // small. const size_t time_since_start = rtc::TimeSince(rec_start_time_); if (time_since_start > kMinValidCallTimeTimeInMilliseconds) { const int only_zeros = static_cast(only_silence_recorded_); RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.RecordedOnlyZeros", only_zeros); - RTC_LOG(INFO) << "HISTOGRAM(WebRTC.Audio.RecordedOnlyZeros): " - << only_zeros; + RTC_LOG(LS_INFO) << "HISTOGRAM(WebRTC.Audio.RecordedOnlyZeros): " + << only_zeros; } - RTC_LOG(INFO) << "total recording time: " << time_since_start; + RTC_LOG(LS_INFO) << "total recording time: " << time_since_start; } int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz) { - RTC_LOG(INFO) << "SetRecordingSampleRate(" << fsHz << ")"; + RTC_LOG(LS_INFO) << "SetRecordingSampleRate(" << fsHz << ")"; rec_sample_rate_ = fsHz; return 0; } int32_t AudioDeviceBuffer::SetPlayoutSampleRate(uint32_t fsHz) { - RTC_LOG(INFO) << "SetPlayoutSampleRate(" << fsHz << ")"; + RTC_LOG(LS_INFO) << "SetPlayoutSampleRate(" << fsHz << ")"; play_sample_rate_ = fsHz; return 0; } @@ -197,13 +199,13 @@ uint32_t AudioDeviceBuffer::PlayoutSampleRate() const { } int32_t AudioDeviceBuffer::SetRecordingChannels(size_t channels) { - RTC_LOG(INFO) << "SetRecordingChannels(" << channels << ")"; + RTC_LOG(LS_INFO) << "SetRecordingChannels(" << channels << ")"; rec_channels_ = channels; return 0; } int32_t AudioDeviceBuffer::SetPlayoutChannels(size_t channels) { - RTC_LOG(INFO) << "SetPlayoutChannels(" << channels << ")"; + RTC_LOG(LS_INFO) << "SetPlayoutChannels(" << channels << ")"; play_channels_ = channels; return 0; } @@ -228,6 +230,12 @@ void AudioDeviceBuffer::SetVQEData(int play_delay_ms, int rec_delay_ms) { int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audio_buffer, size_t samples_per_channel) { + return SetRecordedBuffer(audio_buffer, samples_per_channel, 0); +} + +int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audio_buffer, + size_t samples_per_channel, + int64_t capture_timestamp_ns) { // Copy the complete input buffer to the local buffer. const size_t old_size = rec_buffer_.size(); rec_buffer_.SetData(static_cast(audio_buffer), @@ -238,6 +246,17 @@ int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audio_buffer, RTC_LOG(LS_INFO) << "Size of recording buffer: " << rec_buffer_.size(); } + // If the timestamp is less then or equal to zero, it's not valid and are + // ignored. If we do antimestamp alignment on them they might accidentally + // become greater then zero, and will be handled as if they were a correct + // timestamp. + capture_timestamp_ns_ = + (capture_timestamp_ns > 0) + ? rtc::kNumNanosecsPerMicrosec * + timestamp_aligner_.TranslateTimestamp( + capture_timestamp_ns_ / rtc::kNumNanosecsPerMicrosec, + rtc::TimeMicros()) + : capture_timestamp_ns; // Derive a new level value twice per second and check if it is non-zero. int16_t max_abs = 0; RTC_DCHECK_LT(rec_stat_count_, 50); @@ -245,7 +264,7 @@ int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audio_buffer, // Returns the largest absolute value in a signed 16-bit vector. max_abs = WebRtcSpl_MaxAbsValueW16(rec_buffer_.data(), rec_buffer_.size()); rec_stat_count_ = 0; - // Set |only_silence_recorded_| to false as soon as at least one detection + // Set `only_silence_recorded_` to false as soon as at least one detection // of a non-zero audio packet is found. It can only be restored to true // again by restarting the call. if (max_abs > 0) { @@ -270,7 +289,7 @@ int32_t AudioDeviceBuffer::DeliverRecordedData() { int32_t res = audio_transport_cb_->RecordedDataIsAvailable( rec_buffer_.data(), frames, bytes_per_frame, rec_channels_, rec_sample_rate_, total_delay_ms, 0, 0, typing_status_, - new_mic_level_dummy); + new_mic_level_dummy, capture_timestamp_ns_); if (res == -1) { RTC_LOG(LS_ERROR) << "RecordedDataIsAvailable() failed"; } @@ -408,21 +427,21 @@ void AudioDeviceBuffer::LogStats(LogState state) { ((100.0f * std::abs(rate - rec_sample_rate)) / rec_sample_rate)); RTC_HISTOGRAM_PERCENTAGE("WebRTC.Audio.RecordSampleRateOffsetInPercent", abs_diff_rate_in_percent); - RTC_LOG(INFO) << "[REC : " << time_since_last << "msec, " - << rec_sample_rate / 1000 << "kHz] callbacks: " - << stats.rec_callbacks - last_stats_.rec_callbacks - << ", " - "samples: " - << diff_samples - << ", " - "rate: " - << static_cast(rate + 0.5) - << ", " - "rate diff: " - << abs_diff_rate_in_percent - << "%, " - "level: " - << stats.max_rec_level; + RTC_LOG(LS_INFO) << "[REC : " << time_since_last << "msec, " + << rec_sample_rate / 1000 << "kHz] callbacks: " + << stats.rec_callbacks - last_stats_.rec_callbacks + << ", " + "samples: " + << diff_samples + << ", " + "rate: " + << static_cast(rate + 0.5) + << ", " + "rate diff: " + << abs_diff_rate_in_percent + << "%, " + "level: " + << stats.max_rec_level; } diff_samples = stats.play_samples - last_stats_.play_samples; @@ -434,21 +453,21 @@ void AudioDeviceBuffer::LogStats(LogState state) { ((100.0f * std::abs(rate - play_sample_rate)) / play_sample_rate)); RTC_HISTOGRAM_PERCENTAGE("WebRTC.Audio.PlayoutSampleRateOffsetInPercent", abs_diff_rate_in_percent); - RTC_LOG(INFO) << "[PLAY: " << time_since_last << "msec, " - << play_sample_rate / 1000 << "kHz] callbacks: " - << stats.play_callbacks - last_stats_.play_callbacks - << ", " - "samples: " - << diff_samples - << ", " - "rate: " - << static_cast(rate + 0.5) - << ", " - "rate diff: " - << abs_diff_rate_in_percent - << "%, " - "level: " - << stats.max_play_level; + RTC_LOG(LS_INFO) << "[PLAY: " << time_since_last << "msec, " + << play_sample_rate / 1000 << "kHz] callbacks: " + << stats.play_callbacks - last_stats_.play_callbacks + << ", " + "samples: " + << diff_samples + << ", " + "rate: " + << static_cast(rate + 0.5) + << ", " + "rate diff: " + << abs_diff_rate_in_percent + << "%, " + "level: " + << stats.max_play_level; } } last_stats_ = stats; diff --git a/modules/audio_device/audio_device_buffer.h b/modules/audio_device/audio_device_buffer.h index a0b7953194..ea6ab9a93e 100644 --- a/modules/audio_device/audio_device_buffer.h +++ b/modules/audio_device/audio_device_buffer.h @@ -23,6 +23,7 @@ #include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue.h" #include "rtc_base/thread_annotations.h" +#include "rtc_base/timestamp_aligner.h" namespace webrtc { @@ -97,8 +98,13 @@ class AudioDeviceBuffer { size_t RecordingChannels() const; size_t PlayoutChannels() const; + // TODO(bugs.webrtc.org/13621) Deprecate this function virtual int32_t SetRecordedBuffer(const void* audio_buffer, size_t samples_per_channel); + + virtual int32_t SetRecordedBuffer(const void* audio_buffer, + size_t samples_per_channel, + int64_t capture_timestamp_ns); virtual void SetVQEData(int play_delay_ms, int rec_delay_ms); virtual int32_t DeliverRecordedData(); uint32_t NewMicLevel() const; @@ -187,6 +193,9 @@ class AudioDeviceBuffer { int play_delay_ms_; int rec_delay_ms_; + // Capture timestamp. + int64_t capture_timestamp_ns_; + // Counts number of times LogStats() has been called. size_t num_stat_reports_ RTC_GUARDED_BY(task_queue_); @@ -219,6 +228,10 @@ class AudioDeviceBuffer { // being printed in the LogStats() task. bool log_stats_ RTC_GUARDED_BY(task_queue_); + // Used for converting capture timestaps (recieved from AudioRecordThread + // via AudioRecordJni::DataIsRecorded) to RTC clock. + rtc::TimestampAligner timestamp_aligner_; + // Should *never* be defined in production builds. Only used for testing. // When defined, the output signal will be replaced by a sinus tone at 440Hz. #ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE diff --git a/modules/audio_device/audio_device_data_observer.cc b/modules/audio_device/audio_device_data_observer.cc index be78fd16d7..e54494c285 100644 --- a/modules/audio_device/audio_device_data_observer.cc +++ b/modules/audio_device/audio_device_data_observer.cc @@ -45,17 +45,34 @@ class ADMWrapper : public AudioDeviceModule, public AudioTransport { // Make sure we have a valid ADM before returning it to user. bool IsValid() { return is_valid_; } - // AudioTransport methods overrides. int32_t RecordedDataIsAvailable(const void* audioSamples, - const size_t nSamples, - const size_t nBytesPerSample, - const size_t nChannels, - const uint32_t samples_per_sec, - const uint32_t total_delay_ms, - const int32_t clockDrift, - const uint32_t currentMicLevel, - const bool keyPressed, + size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samples_per_sec, + uint32_t total_delay_ms, + int32_t clockDrift, + uint32_t currentMicLevel, + bool keyPressed, uint32_t& newMicLevel) override { + return RecordedDataIsAvailable(audioSamples, nSamples, nBytesPerSample, + nChannels, samples_per_sec, total_delay_ms, + clockDrift, currentMicLevel, keyPressed, + newMicLevel, /*capture_timestamp_ns*/ 0); + } + + // AudioTransport methods overrides. + int32_t RecordedDataIsAvailable(const void* audioSamples, + size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samples_per_sec, + uint32_t total_delay_ms, + int32_t clockDrift, + uint32_t currentMicLevel, + bool keyPressed, + uint32_t& newMicLevel, + int64_t capture_timestamp_ns) override { int32_t res = 0; // Capture PCM data of locally captured audio. if (observer_) { @@ -67,7 +84,8 @@ class ADMWrapper : public AudioDeviceModule, public AudioTransport { if (audio_transport_) { res = audio_transport_->RecordedDataIsAvailable( audioSamples, nSamples, nBytesPerSample, nChannels, samples_per_sec, - total_delay_ms, clockDrift, currentMicLevel, keyPressed, newMicLevel); + total_delay_ms, clockDrift, currentMicLevel, keyPressed, newMicLevel, + capture_timestamp_ns); } return res; @@ -110,7 +128,7 @@ class ADMWrapper : public AudioDeviceModule, public AudioTransport { void* audio_data, int64_t* elapsed_time_ms, int64_t* ntp_time_ms) override { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } // Override AudioDeviceModule's RegisterAudioCallback method to remember the diff --git a/modules/audio_device/audio_device_impl.cc b/modules/audio_device/audio_device_impl.cc index 84460ff83f..01a8a25c06 100644 --- a/modules/audio_device/audio_device_impl.cc +++ b/modules/audio_device/audio_device_impl.cc @@ -73,7 +73,7 @@ namespace webrtc { rtc::scoped_refptr AudioDeviceModule::Create( AudioLayer audio_layer, TaskQueueFactory* task_queue_factory) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return AudioDeviceModule::CreateForTest(audio_layer, task_queue_factory); } @@ -81,7 +81,7 @@ rtc::scoped_refptr AudioDeviceModule::Create( rtc::scoped_refptr AudioDeviceModule::CreateForTest( AudioLayer audio_layer, TaskQueueFactory* task_queue_factory) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // The "AudioDeviceModule::kWindowsCoreAudio2" audio layer has its own // dedicated factory method which should be used instead. @@ -118,31 +118,31 @@ AudioDeviceModuleImpl::AudioDeviceModuleImpl( AudioLayer audio_layer, TaskQueueFactory* task_queue_factory) : audio_layer_(audio_layer), audio_device_buffer_(task_queue_factory) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; } int32_t AudioDeviceModuleImpl::CheckPlatform() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // Ensure that the current platform is supported PlatformType platform(kPlatformNotSupported); #if defined(_WIN32) platform = kPlatformWin32; - RTC_LOG(INFO) << "current platform is Win32"; + RTC_LOG(LS_INFO) << "current platform is Win32"; #elif defined(WEBRTC_ANDROID) platform = kPlatformAndroid; - RTC_LOG(INFO) << "current platform is Android"; + RTC_LOG(LS_INFO) << "current platform is Android"; #elif defined(WEBRTC_LINUX) platform = kPlatformLinux; - RTC_LOG(INFO) << "current platform is Linux"; + RTC_LOG(LS_INFO) << "current platform is Linux"; #elif defined(WEBRTC_IOS) platform = kPlatformIOS; - RTC_LOG(INFO) << "current platform is IOS"; + RTC_LOG(LS_INFO) << "current platform is IOS"; #elif defined(WEBRTC_MAC) platform = kPlatformMac; - RTC_LOG(INFO) << "current platform is Mac"; + RTC_LOG(LS_INFO) << "current platform is Mac"; #endif if (platform == kPlatformNotSupported) { - RTC_LOG(LERROR) + RTC_LOG(LS_ERROR) << "current platform is not supported => this module will self " "destruct!"; return -1; @@ -152,19 +152,19 @@ int32_t AudioDeviceModuleImpl::CheckPlatform() { } int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; // Dummy ADM implementations if build flags are set. #if defined(WEBRTC_DUMMY_AUDIO_BUILD) audio_device_.reset(new AudioDeviceDummy()); - RTC_LOG(INFO) << "Dummy Audio APIs will be utilized"; + RTC_LOG(LS_INFO) << "Dummy Audio APIs will be utilized"; #elif defined(WEBRTC_DUMMY_FILE_DEVICES) audio_device_.reset(FileAudioDeviceFactory::CreateFileAudioDevice()); if (audio_device_) { - RTC_LOG(INFO) << "Will use file-playing dummy device."; + RTC_LOG(LS_INFO) << "Will use file-playing dummy device."; } else { // Create a dummy device instead. audio_device_.reset(new AudioDeviceDummy()); - RTC_LOG(INFO) << "Dummy Audio APIs will be utilized"; + RTC_LOG(LS_INFO) << "Dummy Audio APIs will be utilized"; } // Real (non-dummy) ADM implementations. @@ -174,10 +174,10 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { #if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD) if ((audio_layer == kWindowsCoreAudio) || (audio_layer == kPlatformDefaultAudio)) { - RTC_LOG(INFO) << "Attempting to use the Windows Core Audio APIs..."; + RTC_LOG(LS_INFO) << "Attempting to use the Windows Core Audio APIs..."; if (AudioDeviceWindowsCore::CoreAudioIsSupported()) { audio_device_.reset(new AudioDeviceWindowsCore()); - RTC_LOG(INFO) << "Windows Core Audio APIs will be utilized"; + RTC_LOG(LS_INFO) << "Windows Core Audio APIs will be utilized"; } } #endif // defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD) @@ -251,11 +251,11 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { // - kPlatformDefaultAudio => ALSA, and // - kLinuxAlsaAudio => ALSA, and // - kLinuxPulseAudio => Invalid selection. - RTC_LOG(WARNING) << "PulseAudio is disabled using build flag."; + RTC_LOG(LS_WARNING) << "PulseAudio is disabled using build flag."; if ((audio_layer == kLinuxAlsaAudio) || (audio_layer == kPlatformDefaultAudio)) { audio_device_.reset(new AudioDeviceLinuxALSA()); - RTC_LOG(INFO) << "Linux ALSA APIs will be utilized."; + RTC_LOG(LS_INFO) << "Linux ALSA APIs will be utilized."; } #else // Build flag 'rtc_include_pulse_audio' is set to true (default). In this @@ -263,15 +263,15 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { // - kPlatformDefaultAudio => PulseAudio, and // - kLinuxPulseAudio => PulseAudio, and // - kLinuxAlsaAudio => ALSA (supported but not default). - RTC_LOG(INFO) << "PulseAudio support is enabled."; + RTC_LOG(LS_INFO) << "PulseAudio support is enabled."; if ((audio_layer == kLinuxPulseAudio) || (audio_layer == kPlatformDefaultAudio)) { // Linux PulseAudio implementation is default. audio_device_.reset(new AudioDeviceLinuxPulse()); - RTC_LOG(INFO) << "Linux PulseAudio APIs will be utilized"; + RTC_LOG(LS_INFO) << "Linux PulseAudio APIs will be utilized"; } else if (audio_layer == kLinuxAlsaAudio) { audio_device_.reset(new AudioDeviceLinuxALSA()); - RTC_LOG(WARNING) << "Linux ALSA APIs will be utilized."; + RTC_LOG(LS_WARNING) << "Linux ALSA APIs will be utilized."; } #endif // #if !defined(WEBRTC_ENABLE_LINUX_PULSE) #endif // #if defined(WEBRTC_LINUX) @@ -281,7 +281,7 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { if (audio_layer == kPlatformDefaultAudio) { audio_device_.reset( new ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/false)); - RTC_LOG(INFO) << "iPhone Audio APIs will be utilized."; + RTC_LOG(LS_INFO) << "iPhone Audio APIs will be utilized."; } // END #if defined(WEBRTC_IOS) @@ -289,14 +289,14 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { #elif defined(WEBRTC_MAC) if (audio_layer == kPlatformDefaultAudio) { audio_device_.reset(new AudioDeviceMac()); - RTC_LOG(INFO) << "Mac OS X Audio APIs will be utilized."; + RTC_LOG(LS_INFO) << "Mac OS X Audio APIs will be utilized."; } #endif // WEBRTC_MAC // Dummy ADM implementation. if (audio_layer == kDummyAudio) { audio_device_.reset(new AudioDeviceDummy()); - RTC_LOG(INFO) << "Dummy Audio APIs will be utilized."; + RTC_LOG(LS_INFO) << "Dummy Audio APIs will be utilized."; } #endif // if defined(WEBRTC_DUMMY_AUDIO_BUILD) @@ -309,17 +309,17 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() { } int32_t AudioDeviceModuleImpl::AttachAudioBuffer() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; audio_device_->AttachAudioBuffer(&audio_device_buffer_); return 0; } AudioDeviceModuleImpl::~AudioDeviceModuleImpl() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; } int32_t AudioDeviceModuleImpl::ActiveAudioLayer(AudioLayer* audioLayer) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; AudioLayer activeAudio; if (audio_device_->ActiveAudioLayer(activeAudio) == -1) { return -1; @@ -329,7 +329,7 @@ int32_t AudioDeviceModuleImpl::ActiveAudioLayer(AudioLayer* audioLayer) const { } int32_t AudioDeviceModuleImpl::Init() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; if (initialized_) return 0; RTC_CHECK(audio_device_); @@ -346,7 +346,7 @@ int32_t AudioDeviceModuleImpl::Init() { } int32_t AudioDeviceModuleImpl::Terminate() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; if (!initialized_) return 0; if (audio_device_->Terminate() == -1) { @@ -357,65 +357,65 @@ int32_t AudioDeviceModuleImpl::Terminate() { } bool AudioDeviceModuleImpl::Initialized() const { - RTC_LOG(INFO) << __FUNCTION__ << ": " << initialized_; + RTC_LOG(LS_INFO) << __FUNCTION__ << ": " << initialized_; return initialized_; } int32_t AudioDeviceModuleImpl::InitSpeaker() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->InitSpeaker(); } int32_t AudioDeviceModuleImpl::InitMicrophone() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->InitMicrophone(); } int32_t AudioDeviceModuleImpl::SpeakerVolumeIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->SpeakerVolumeIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleImpl::SetSpeakerVolume(uint32_t volume) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << volume << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")"; CHECKinitialized_(); return audio_device_->SetSpeakerVolume(volume); } int32_t AudioDeviceModuleImpl::SpeakerVolume(uint32_t* volume) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); uint32_t level = 0; if (audio_device_->SpeakerVolume(level) == -1) { return -1; } *volume = level; - RTC_LOG(INFO) << "output: " << *volume; + RTC_LOG(LS_INFO) << "output: " << *volume; return 0; } bool AudioDeviceModuleImpl::SpeakerIsInitialized() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isInitialized = audio_device_->SpeakerIsInitialized(); - RTC_LOG(INFO) << "output: " << isInitialized; + RTC_LOG(LS_INFO) << "output: " << isInitialized; return isInitialized; } bool AudioDeviceModuleImpl::MicrophoneIsInitialized() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isInitialized = audio_device_->MicrophoneIsInitialized(); - RTC_LOG(INFO) << "output: " << isInitialized; + RTC_LOG(LS_INFO) << "output: " << isInitialized; return isInitialized; } @@ -440,119 +440,119 @@ int32_t AudioDeviceModuleImpl::MinSpeakerVolume(uint32_t* minVolume) const { } int32_t AudioDeviceModuleImpl::SpeakerMuteIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->SpeakerMuteIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleImpl::SetSpeakerMute(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); return audio_device_->SetSpeakerMute(enable); } int32_t AudioDeviceModuleImpl::SpeakerMute(bool* enabled) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool muted = false; if (audio_device_->SpeakerMute(muted) == -1) { return -1; } *enabled = muted; - RTC_LOG(INFO) << "output: " << muted; + RTC_LOG(LS_INFO) << "output: " << muted; return 0; } int32_t AudioDeviceModuleImpl::MicrophoneMuteIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->MicrophoneMuteIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleImpl::SetMicrophoneMute(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); return (audio_device_->SetMicrophoneMute(enable)); } int32_t AudioDeviceModuleImpl::MicrophoneMute(bool* enabled) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool muted = false; if (audio_device_->MicrophoneMute(muted) == -1) { return -1; } *enabled = muted; - RTC_LOG(INFO) << "output: " << muted; + RTC_LOG(LS_INFO) << "output: " << muted; return 0; } int32_t AudioDeviceModuleImpl::MicrophoneVolumeIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->MicrophoneVolumeIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleImpl::SetMicrophoneVolume(uint32_t volume) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << volume << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << volume << ")"; CHECKinitialized_(); return (audio_device_->SetMicrophoneVolume(volume)); } int32_t AudioDeviceModuleImpl::MicrophoneVolume(uint32_t* volume) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); uint32_t level = 0; if (audio_device_->MicrophoneVolume(level) == -1) { return -1; } *volume = level; - RTC_LOG(INFO) << "output: " << *volume; + RTC_LOG(LS_INFO) << "output: " << *volume; return 0; } int32_t AudioDeviceModuleImpl::StereoRecordingIsAvailable( bool* available) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->StereoRecordingIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleImpl::SetStereoRecording(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); if (audio_device_->RecordingIsInitialized()) { - RTC_LOG(LERROR) + RTC_LOG(LS_ERROR) << "unable to set stereo mode after recording is initialized"; return -1; } if (audio_device_->SetStereoRecording(enable) == -1) { if (enable) { - RTC_LOG(WARNING) << "failed to enable stereo recording"; + RTC_LOG(LS_WARNING) << "failed to enable stereo recording"; } return -1; } @@ -565,39 +565,39 @@ int32_t AudioDeviceModuleImpl::SetStereoRecording(bool enable) { } int32_t AudioDeviceModuleImpl::StereoRecording(bool* enabled) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool stereo = false; if (audio_device_->StereoRecording(stereo) == -1) { return -1; } *enabled = stereo; - RTC_LOG(INFO) << "output: " << stereo; + RTC_LOG(LS_INFO) << "output: " << stereo; return 0; } int32_t AudioDeviceModuleImpl::StereoPlayoutIsAvailable(bool* available) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->StereoPlayoutIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleImpl::SetStereoPlayout(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); if (audio_device_->PlayoutIsInitialized()) { - RTC_LOG(LERROR) + RTC_LOG(LS_ERROR) << "unable to set stereo mode while playing side is initialized"; return -1; } if (audio_device_->SetStereoPlayout(enable)) { - RTC_LOG(WARNING) << "stereo playout is not supported"; + RTC_LOG(LS_WARNING) << "stereo playout is not supported"; return -1; } int8_t nChannels(1); @@ -609,38 +609,38 @@ int32_t AudioDeviceModuleImpl::SetStereoPlayout(bool enable) { } int32_t AudioDeviceModuleImpl::StereoPlayout(bool* enabled) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool stereo = false; if (audio_device_->StereoPlayout(stereo) == -1) { return -1; } *enabled = stereo; - RTC_LOG(INFO) << "output: " << stereo; + RTC_LOG(LS_INFO) << "output: " << stereo; return 0; } int32_t AudioDeviceModuleImpl::PlayoutIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->PlayoutIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleImpl::RecordingIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->RecordingIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return 0; } @@ -665,21 +665,21 @@ int32_t AudioDeviceModuleImpl::MinMicrophoneVolume(uint32_t* minVolume) const { } int16_t AudioDeviceModuleImpl::PlayoutDevices() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); uint16_t nPlayoutDevices = audio_device_->PlayoutDevices(); - RTC_LOG(INFO) << "output: " << nPlayoutDevices; + RTC_LOG(LS_INFO) << "output: " << nPlayoutDevices; return (int16_t)(nPlayoutDevices); } int32_t AudioDeviceModuleImpl::SetPlayoutDevice(uint16_t index) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ")"; CHECKinitialized_(); return audio_device_->SetPlayoutDevice(index); } int32_t AudioDeviceModuleImpl::SetPlayoutDevice(WindowsDeviceType device) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->SetPlayoutDevice(device); } @@ -688,7 +688,7 @@ int32_t AudioDeviceModuleImpl::PlayoutDeviceName( uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ", ...)"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)"; CHECKinitialized_(); if (name == NULL) { return -1; @@ -697,10 +697,10 @@ int32_t AudioDeviceModuleImpl::PlayoutDeviceName( return -1; } if (name != NULL) { - RTC_LOG(INFO) << "output: name = " << name; + RTC_LOG(LS_INFO) << "output: name = " << name; } if (guid != NULL) { - RTC_LOG(INFO) << "output: guid = " << guid; + RTC_LOG(LS_INFO) << "output: guid = " << guid; } return 0; } @@ -709,7 +709,7 @@ int32_t AudioDeviceModuleImpl::RecordingDeviceName( uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ", ...)"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ", ...)"; CHECKinitialized_(); if (name == NULL) { return -1; @@ -718,137 +718,137 @@ int32_t AudioDeviceModuleImpl::RecordingDeviceName( return -1; } if (name != NULL) { - RTC_LOG(INFO) << "output: name = " << name; + RTC_LOG(LS_INFO) << "output: name = " << name; } if (guid != NULL) { - RTC_LOG(INFO) << "output: guid = " << guid; + RTC_LOG(LS_INFO) << "output: guid = " << guid; } return 0; } int16_t AudioDeviceModuleImpl::RecordingDevices() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); uint16_t nRecordingDevices = audio_device_->RecordingDevices(); - RTC_LOG(INFO) << "output: " << nRecordingDevices; + RTC_LOG(LS_INFO) << "output: " << nRecordingDevices; return (int16_t)nRecordingDevices; } int32_t AudioDeviceModuleImpl::SetRecordingDevice(uint16_t index) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << index << ")"; CHECKinitialized_(); return audio_device_->SetRecordingDevice(index); } int32_t AudioDeviceModuleImpl::SetRecordingDevice(WindowsDeviceType device) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->SetRecordingDevice(device); } int32_t AudioDeviceModuleImpl::InitPlayout() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); if (PlayoutIsInitialized()) { return 0; } int32_t result = audio_device_->InitPlayout(); - RTC_LOG(INFO) << "output: " << result; + RTC_LOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitPlayoutSuccess", static_cast(result == 0)); return result; } int32_t AudioDeviceModuleImpl::InitRecording() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); if (RecordingIsInitialized()) { return 0; } int32_t result = audio_device_->InitRecording(); - RTC_LOG(INFO) << "output: " << result; + RTC_LOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitRecordingSuccess", static_cast(result == 0)); return result; } bool AudioDeviceModuleImpl::PlayoutIsInitialized() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->PlayoutIsInitialized(); } bool AudioDeviceModuleImpl::RecordingIsInitialized() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->RecordingIsInitialized(); } int32_t AudioDeviceModuleImpl::StartPlayout() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); if (Playing()) { return 0; } audio_device_buffer_.StartPlayout(); int32_t result = audio_device_->StartPlayout(); - RTC_LOG(INFO) << "output: " << result; + RTC_LOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess", static_cast(result == 0)); return result; } int32_t AudioDeviceModuleImpl::StopPlayout() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); int32_t result = audio_device_->StopPlayout(); audio_device_buffer_.StopPlayout(); - RTC_LOG(INFO) << "output: " << result; + RTC_LOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess", static_cast(result == 0)); return result; } bool AudioDeviceModuleImpl::Playing() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->Playing(); } int32_t AudioDeviceModuleImpl::StartRecording() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); if (Recording()) { return 0; } audio_device_buffer_.StartRecording(); int32_t result = audio_device_->StartRecording(); - RTC_LOG(INFO) << "output: " << result; + RTC_LOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess", static_cast(result == 0)); return result; } int32_t AudioDeviceModuleImpl::StopRecording() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); int32_t result = audio_device_->StopRecording(); audio_device_buffer_.StopRecording(); - RTC_LOG(INFO) << "output: " << result; + RTC_LOG(LS_INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopRecordingSuccess", static_cast(result == 0)); return result; } bool AudioDeviceModuleImpl::Recording() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->Recording(); } int32_t AudioDeviceModuleImpl::RegisterAudioCallback( AudioTransport* audioCallback) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; return audio_device_buffer_.RegisterAudioCallback(audioCallback); } @@ -856,7 +856,7 @@ int32_t AudioDeviceModuleImpl::PlayoutDelay(uint16_t* delayMS) const { CHECKinitialized_(); uint16_t delay = 0; if (audio_device_->PlayoutDelay(delay) == -1) { - RTC_LOG(LERROR) << "failed to retrieve the playout delay"; + RTC_LOG(LS_ERROR) << "failed to retrieve the playout delay"; return -1; } *delayMS = delay; @@ -864,87 +864,87 @@ int32_t AudioDeviceModuleImpl::PlayoutDelay(uint16_t* delayMS) const { } bool AudioDeviceModuleImpl::BuiltInAECIsAvailable() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isAvailable = audio_device_->BuiltInAECIsAvailable(); - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return isAvailable; } int32_t AudioDeviceModuleImpl::EnableBuiltInAEC(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); int32_t ok = audio_device_->EnableBuiltInAEC(enable); - RTC_LOG(INFO) << "output: " << ok; + RTC_LOG(LS_INFO) << "output: " << ok; return ok; } bool AudioDeviceModuleImpl::BuiltInAGCIsAvailable() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isAvailable = audio_device_->BuiltInAGCIsAvailable(); - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return isAvailable; } int32_t AudioDeviceModuleImpl::EnableBuiltInAGC(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); int32_t ok = audio_device_->EnableBuiltInAGC(enable); - RTC_LOG(INFO) << "output: " << ok; + RTC_LOG(LS_INFO) << "output: " << ok; return ok; } bool AudioDeviceModuleImpl::BuiltInNSIsAvailable() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isAvailable = audio_device_->BuiltInNSIsAvailable(); - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_LOG(LS_INFO) << "output: " << isAvailable; return isAvailable; } int32_t AudioDeviceModuleImpl::EnableBuiltInNS(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); int32_t ok = audio_device_->EnableBuiltInNS(enable); - RTC_LOG(INFO) << "output: " << ok; + RTC_LOG(LS_INFO) << "output: " << ok; return ok; } int32_t AudioDeviceModuleImpl::GetPlayoutUnderrunCount() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; CHECKinitialized_(); int32_t underrunCount = audio_device_->GetPlayoutUnderrunCount(); - RTC_LOG(INFO) << "output: " << underrunCount; + RTC_LOG(LS_INFO) << "output: " << underrunCount; return underrunCount; } #if defined(WEBRTC_IOS) int AudioDeviceModuleImpl::GetPlayoutAudioParameters( AudioParameters* params) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; int r = audio_device_->GetPlayoutAudioParameters(params); - RTC_LOG(INFO) << "output: " << r; + RTC_LOG(LS_INFO) << "output: " << r; return r; } int AudioDeviceModuleImpl::GetRecordAudioParameters( AudioParameters* params) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; int r = audio_device_->GetRecordAudioParameters(params); - RTC_LOG(INFO) << "output: " << r; + RTC_LOG(LS_INFO) << "output: " << r; return r; } #endif // WEBRTC_IOS AudioDeviceModuleImpl::PlatformType AudioDeviceModuleImpl::Platform() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; return platform_type_; } AudioDeviceModule::AudioLayer AudioDeviceModuleImpl::PlatformAudioLayer() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_LOG(LS_INFO) << __FUNCTION__; return audio_layer_; } diff --git a/modules/audio_device/audio_device_unittest.cc b/modules/audio_device/audio_device_unittest.cc index b0af9521c6..51a8575b66 100644 --- a/modules/audio_device/audio_device_unittest.cc +++ b/modules/audio_device/audio_device_unittest.cc @@ -162,21 +162,21 @@ class FifoAudioStream : public AudioStream { // channel configuration. No conversion is needed. std::copy(buffer.begin(), buffer.end(), destination.begin()); } else if (destination.size() == 2 * buffer.size()) { - // Recorded input signal in |buffer| is in mono. Do channel upmix to + // Recorded input signal in `buffer` is in mono. Do channel upmix to // match stereo output (1 -> 2). for (size_t i = 0; i < buffer.size(); ++i) { destination[2 * i] = buffer[i]; destination[2 * i + 1] = buffer[i]; } } else if (buffer.size() == 2 * destination.size()) { - // Recorded input signal in |buffer| is in stereo. Do channel downmix + // Recorded input signal in `buffer` is in stereo. Do channel downmix // to match mono output (2 -> 1). for (size_t i = 0; i < destination.size(); ++i) { destination[i] = (static_cast(buffer[2 * i]) + buffer[2 * i + 1]) / 2; } } else { - RTC_NOTREACHED() << "Required conversion is not support"; + RTC_DCHECK_NOTREACHED() << "Required conversion is not support"; } fifo_.pop_front(); } @@ -219,7 +219,7 @@ class LatencyAudioStream : public AudioStream { write_thread_checker_.Detach(); } - // Insert periodic impulses in first two samples of |destination|. + // Insert periodic impulses in first two samples of `destination`. void Read(rtc::ArrayView destination) override { RTC_DCHECK_RUN_ON(&read_thread_checker_); if (read_count_ == 0) { @@ -240,7 +240,7 @@ class LatencyAudioStream : public AudioStream { } } - // Detect received impulses in |source|, derive time between transmission and + // Detect received impulses in `source`, derive time between transmission and // detection and add the calculated delay to list of latencies. void Write(rtc::ArrayView source) override { RTC_DCHECK_RUN_ON(&write_thread_checker_); @@ -249,7 +249,7 @@ class LatencyAudioStream : public AudioStream { write_count_++; if (!pulse_time_) { // Avoid detection of new impulse response until a new impulse has - // been transmitted (sets |pulse_time_| to value larger than zero). + // been transmitted (sets `pulse_time_` to value larger than zero). return; } // Find index (element position in vector) of the max element. @@ -267,7 +267,7 @@ class LatencyAudioStream : public AudioStream { // Total latency is the difference between transmit time and detection // tome plus the extra delay within the buffer in which we detected the // received impulse. It is transmitted at sample 0 but can be received - // at sample N where N > 0. The term |extra_delay| accounts for N and it + // at sample N where N > 0. The term `extra_delay` accounts for N and it // is a value between 0 and 10ms. latencies_.push_back(now_time - *pulse_time_ + extra_delay); pulse_time_.reset(); @@ -586,7 +586,7 @@ class MAYBE_AudioDeviceTest rtc::scoped_refptr CreateAudioDevice() { // Use the default factory for kPlatformDefaultAudio and a special factory // CreateWindowsCoreAudioAudioDeviceModuleForTest() for kWindowsCoreAudio2. - // The value of |audio_layer_| is set at construction by GetParam() and two + // The value of `audio_layer_` is set at construction by GetParam() and two // different layers are tested on Windows only. if (audio_layer_ == AudioDeviceModule::kPlatformDefaultAudio) { return AudioDeviceModule::CreateForTest(audio_layer_, diff --git a/modules/audio_device/dummy/file_audio_device.h b/modules/audio_device/dummy/file_audio_device.h index f4a6b76586..4d6858f992 100644 --- a/modules/audio_device/dummy/file_audio_device.h +++ b/modules/audio_device/dummy/file_audio_device.h @@ -28,8 +28,8 @@ namespace webrtc { // and plays out into a file. class FileAudioDevice : public AudioDeviceGeneric { public: - // Constructs a file audio device with |id|. It will read audio from - // |inputFilename| and record output audio to |outputFilename|. + // Constructs a file audio device with `id`. It will read audio from + // `inputFilename` and record output audio to `outputFilename`. // // The input file should be a readable 48k stereo raw file, and the output // file should point to a writable location. The output format will also be diff --git a/modules/audio_device/fine_audio_buffer.cc b/modules/audio_device/fine_audio_buffer.cc index b4f3c371ac..86240da196 100644 --- a/modules/audio_device/fine_audio_buffer.cc +++ b/modules/audio_device/fine_audio_buffer.cc @@ -29,21 +29,21 @@ FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* audio_device_buffer) playout_channels_(audio_device_buffer->PlayoutChannels()), record_channels_(audio_device_buffer->RecordingChannels()) { RTC_DCHECK(audio_device_buffer_); - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (IsReadyForPlayout()) { - RTC_DLOG(INFO) << "playout_samples_per_channel_10ms: " - << playout_samples_per_channel_10ms_; - RTC_DLOG(INFO) << "playout_channels: " << playout_channels_; + RTC_DLOG(LS_INFO) << "playout_samples_per_channel_10ms: " + << playout_samples_per_channel_10ms_; + RTC_DLOG(LS_INFO) << "playout_channels: " << playout_channels_; } if (IsReadyForRecord()) { - RTC_DLOG(INFO) << "record_samples_per_channel_10ms: " - << record_samples_per_channel_10ms_; - RTC_DLOG(INFO) << "record_channels: " << record_channels_; + RTC_DLOG(LS_INFO) << "record_samples_per_channel_10ms: " + << record_samples_per_channel_10ms_; + RTC_DLOG(LS_INFO) << "record_channels: " << record_channels_; } } FineAudioBuffer::~FineAudioBuffer() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; } void FineAudioBuffer::ResetPlayout() { @@ -113,7 +113,7 @@ void FineAudioBuffer::DeliverRecordedData( record_buffer_.AppendData(audio_buffer.data(), audio_buffer.size()); // Consume samples from buffer in chunks of 10ms until there is not // enough data left. The number of remaining samples in the cache is given by - // the new size of the internal |record_buffer_|. + // the new size of the internal `record_buffer_`. const size_t num_elements_10ms = record_channels_ * record_samples_per_channel_10ms_; while (record_buffer_.size() >= num_elements_10ms) { diff --git a/modules/audio_device/fine_audio_buffer.h b/modules/audio_device/fine_audio_buffer.h index 210eda834a..a6c3042bb2 100644 --- a/modules/audio_device/fine_audio_buffer.h +++ b/modules/audio_device/fine_audio_buffer.h @@ -29,7 +29,7 @@ class AudioDeviceBuffer; // accumulated 10ms worth of data to the ADB every second call. class FineAudioBuffer { public: - // |device_buffer| is a buffer that provides 10ms of audio data. + // `device_buffer` is a buffer that provides 10ms of audio data. FineAudioBuffer(AudioDeviceBuffer* audio_device_buffer); ~FineAudioBuffer(); @@ -42,18 +42,18 @@ class FineAudioBuffer { bool IsReadyForPlayout() const; bool IsReadyForRecord() const; - // Copies audio samples into |audio_buffer| where number of requested - // elements is specified by |audio_buffer.size()|. The producer will always + // Copies audio samples into `audio_buffer` where number of requested + // elements is specified by `audio_buffer.size()`. The producer will always // fill up the audio buffer and if no audio exists, the buffer will contain - // silence instead. The provided delay estimate in |playout_delay_ms| should + // silence instead. The provided delay estimate in `playout_delay_ms` should // contain an estimate of the latency between when an audio frame is read from // WebRTC and when it is played out on the speaker. void GetPlayoutData(rtc::ArrayView audio_buffer, int playout_delay_ms); - // Consumes the audio data in |audio_buffer| and sends it to the WebRTC layer + // Consumes the audio data in `audio_buffer` and sends it to the WebRTC layer // in chunks of 10ms. The sum of the provided delay estimate in - // |record_delay_ms| and the latest |playout_delay_ms| in GetPlayoutData() + // `record_delay_ms` and the latest `playout_delay_ms` in GetPlayoutData() // are given to the AEC in the audio processing module. // They can be fixed values on most platforms and they are ignored if an // external (hardware/built-in) AEC is used. @@ -72,11 +72,11 @@ class FineAudioBuffer { // time of this object. AudioDeviceBuffer* const audio_device_buffer_; // Number of audio samples per channel per 10ms. Set once at construction - // based on parameters in |audio_device_buffer|. + // based on parameters in `audio_device_buffer`. const size_t playout_samples_per_channel_10ms_; const size_t record_samples_per_channel_10ms_; // Number of audio channels. Set once at construction based on parameters in - // |audio_device_buffer|. + // `audio_device_buffer`. const size_t playout_channels_; const size_t record_channels_; // Storage for output samples from which a consumer can read audio buffers diff --git a/modules/audio_device/fine_audio_buffer_unittest.cc b/modules/audio_device/fine_audio_buffer_unittest.cc index 2199067098..36ea85f7dd 100644 --- a/modules/audio_device/fine_audio_buffer_unittest.cc +++ b/modules/audio_device/fine_audio_buffer_unittest.cc @@ -36,7 +36,7 @@ const int kSamplesPer10Ms = kSampleRate * 10 / 1000; // E.g. if there are two buffers of size 3, buffer 1 would contain 0,1,2 and // buffer 2 would contain 3,4,5. Note that SCHAR_MAX is 127 so wrap-around // will happen. -// |buffer| is the audio buffer to verify. +// `buffer` is the audio buffer to verify. bool VerifyBuffer(const int16_t* buffer, int buffer_number, int size) { int start_value = (buffer_number * size) % SCHAR_MAX; for (int i = 0; i < size; ++i) { @@ -51,9 +51,9 @@ bool VerifyBuffer(const int16_t* buffer, int buffer_number, int size) { // called (which is done implicitly when calling GetBufferData). It writes the // sequence 0,1,..SCHAR_MAX-1,0,1,... to the buffer. Note that this is likely a // buffer of different size than the one VerifyBuffer verifies. -// |iteration| is the number of calls made to UpdateBuffer prior to this call. -// |samples_per_10_ms| is the number of samples that should be written to the -// buffer (|arg0|). +// `iteration` is the number of calls made to UpdateBuffer prior to this call. +// `samples_per_10_ms` is the number of samples that should be written to the +// buffer (`arg0`). ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) { int16_t* buffer = static_cast(arg0); int start_value = (iteration * samples_per_10_ms) % SCHAR_MAX; @@ -64,7 +64,7 @@ ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) { return samples_per_10_ms / kChannels; } -// Writes a periodic ramp pattern to the supplied |buffer|. See UpdateBuffer() +// Writes a periodic ramp pattern to the supplied `buffer`. See UpdateBuffer() // for details. void UpdateInputBuffer(int16_t* buffer, int iteration, int size) { int start_value = (iteration * size) % SCHAR_MAX; @@ -74,7 +74,7 @@ void UpdateInputBuffer(int16_t* buffer, int iteration, int size) { } // Action macro which verifies that the recorded 10ms chunk of audio data -// (in |arg0|) contains the correct reference values even if they have been +// (in `arg0`) contains the correct reference values even if they have been // supplied using a buffer size that is smaller or larger than 10ms. // See VerifyBuffer() for details. ACTION_P2(VerifyInputBuffer, iteration, samples_per_10_ms) { diff --git a/modules/audio_device/g3doc/audio_device_module.md b/modules/audio_device/g3doc/audio_device_module.md index 3aa1a59d08..101b2e4cc8 100644 --- a/modules/audio_device/g3doc/audio_device_module.md +++ b/modules/audio_device/g3doc/audio_device_module.md @@ -53,7 +53,7 @@ The most up-to-date implementations of the ADM interface are for [iOS][27] and for [Android][28]. Desktop version are not updated to comply with the latest -[C++ style guide](https://chromium.googlesource.com/chromium/src/+/master/styleguide/c++/c++.md) +[C++ style guide](https://chromium.googlesource.com/chromium/src/+/main/styleguide/c++/c++.md) and more work is also needed to improve the performance and stability of these versions. @@ -132,40 +132,40 @@ Finally, the corresponding stop methods [`AudiDeviceModule::StopRecording`][35] and [`AudiDeviceModule::StopPlayout`][36] are called followed by [`AudiDeviceModule::Terminate`][37]. -[1]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/media/engine/webrtc_voice_engine.cc;l=314;drc=f7b1b95f11c74cb5369fdd528b73c70a50f2e206 -[2]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/media/engine/webrtc_voice_engine.h;l=48;drc=d15a575ec3528c252419149d35977e55269d8a41 -[3]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/audio_device_unittest.cc;l=1;drc=d15a575ec3528c252419149d35977e55269d8a41 -[4]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=46;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e -[5]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/media/engine/adm_helpers.h;drc=2222a80e79ae1ef5cb9510ec51d3868be75f47a2 -[6]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=62;drc=9438fb3fff97c803d1ead34c0e4f223db168526f -[7]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=77;drc=9438fb3fff97c803d1ead34c0e4f223db168526f -[8]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=69;drc=9438fb3fff97c803d1ead34c0e4f223db168526f -[9]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=79;drc=9438fb3fff97c803d1ead34c0e4f223db168526f -[10]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=72;drc=9438fb3fff97c803d1ead34c0e4f223db168526f -[11]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=99;drc=9438fb3fff97c803d1ead34c0e4f223db168526f -[12]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=101;drc=9438fb3fff97c803d1ead34c0e4f223db168526f -[13]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=130;drc=9438fb3fff97c803d1ead34c0e4f223db168526f -[14]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=133;drc=9438fb3fff97c803d1ead34c0e4f223db168526f -[15]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=59;drc=9438fb3fff97c803d1ead34c0e4f223db168526f -[16]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device_defines.h;l=34;drc=9438fb3fff97c803d1ead34c0e4f223db168526f -[17]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device_defines.h;l=36;drc=9438fb3fff97c803d1ead34c0e4f223db168526f -[18]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device_defines.h;l=48;drc=9438fb3fff97c803d1ead34c0e4f223db168526f -[19]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738es -[20]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/sdk/objc/native/api/audio_device_module.h;drc=76443eafa9375374d9f1d23da2b913f2acac6ac2 -[21]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/sdk/android/src/jni/audio_device/audio_device_module.h;drc=bbeb10925eb106eeed6143ccf571bc438ec22ce1 -[22]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/linux/;drc=d15a575ec3528c252419149d35977e55269d8a41 -[23]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/win/;drc=d15a575ec3528c252419149d35977e55269d8a41 -[24]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/mac/;drc=3b68aa346a5d3483c3448852d19d91723846825c -[25]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/create_peerconnection_factory.h;l=45;drc=09ceed2165137c4bea4e02e8d3db31970d0bf273 -[26]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/pc/peer_connection_interface_unittest.cc;l=692;drc=2efb8a5ec61b1b87475d046c03d20244f53b14b6 -[27]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/sdk/objc/native/api/audio_device_module.h;drc=76443eafa9375374d9f1d23da2b913f2acac6ac2 -[28]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/sdk/android/src/jni/audio_device/audio_device_module.h;drc=bbeb10925eb106eeed6143ccf571bc438ec22ce1 -[29]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/pc/test/fake_audio_capture_module.h;l=42;drc=d15a575ec3528c252419149d35977e55269d8a41 -[30]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/audio/audio_state.h;drc=d15a575ec3528c252419149d35977e55269d8a41 -[31]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=87;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e -[32]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=94;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e -[33]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=84;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e -[34]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=91;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e -[35]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=95;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e -[36]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=92;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e -[37]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_device/include/audio_device.h;l=63;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e +[1]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/media/engine/webrtc_voice_engine.cc;l=314;drc=f7b1b95f11c74cb5369fdd528b73c70a50f2e206 +[2]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/media/engine/webrtc_voice_engine.h;l=48;drc=d15a575ec3528c252419149d35977e55269d8a41 +[3]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/audio_device_unittest.cc;l=1;drc=d15a575ec3528c252419149d35977e55269d8a41 +[4]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=46;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e +[5]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/media/engine/adm_helpers.h;drc=2222a80e79ae1ef5cb9510ec51d3868be75f47a2 +[6]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=62;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[7]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=77;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[8]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=69;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[9]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=79;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[10]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=72;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[11]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=99;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[12]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=101;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[13]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=130;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[14]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=133;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[15]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=59;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[16]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device_defines.h;l=34;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[17]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device_defines.h;l=36;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[18]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device_defines.h;l=48;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[19]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738es +[20]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/sdk/objc/native/api/audio_device_module.h;drc=76443eafa9375374d9f1d23da2b913f2acac6ac2 +[21]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/sdk/android/src/jni/audio_device/audio_device_module.h;drc=bbeb10925eb106eeed6143ccf571bc438ec22ce1 +[22]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/linux/;drc=d15a575ec3528c252419149d35977e55269d8a41 +[23]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/win/;drc=d15a575ec3528c252419149d35977e55269d8a41 +[24]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/mac/;drc=3b68aa346a5d3483c3448852d19d91723846825c +[25]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/create_peerconnection_factory.h;l=45;drc=09ceed2165137c4bea4e02e8d3db31970d0bf273 +[26]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/peer_connection_interface_unittest.cc;l=692;drc=2efb8a5ec61b1b87475d046c03d20244f53b14b6 +[27]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/sdk/objc/native/api/audio_device_module.h;drc=76443eafa9375374d9f1d23da2b913f2acac6ac2 +[28]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/sdk/android/src/jni/audio_device/audio_device_module.h;drc=bbeb10925eb106eeed6143ccf571bc438ec22ce1 +[29]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/test/fake_audio_capture_module.h;l=42;drc=d15a575ec3528c252419149d35977e55269d8a41 +[30]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/audio/audio_state.h;drc=d15a575ec3528c252419149d35977e55269d8a41 +[31]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=87;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e +[32]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=94;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e +[33]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=84;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e +[34]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=91;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e +[35]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=95;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e +[36]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=92;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e +[37]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_device/include/audio_device.h;l=63;drc=eb8c4ca608486add9800f6bfb7a8ba3cf23e738e diff --git a/modules/audio_device/include/audio_device_data_observer.h b/modules/audio_device/include/audio_device_data_observer.h index b59cafcb5d..36dc45f19e 100644 --- a/modules/audio_device/include/audio_device_data_observer.h +++ b/modules/audio_device/include/audio_device_data_observer.h @@ -26,16 +26,16 @@ namespace webrtc { class AudioDeviceDataObserver { public: virtual void OnCaptureData(const void* audio_samples, - const size_t num_samples, - const size_t bytes_per_sample, - const size_t num_channels, - const uint32_t samples_per_sec) = 0; + size_t num_samples, + size_t bytes_per_sample, + size_t num_channels, + uint32_t samples_per_sec) = 0; virtual void OnRenderData(const void* audio_samples, - const size_t num_samples, - const size_t bytes_per_sample, - const size_t num_channels, - const uint32_t samples_per_sec) = 0; + size_t num_samples, + size_t bytes_per_sample, + size_t num_channels, + uint32_t samples_per_sec) = 0; AudioDeviceDataObserver() = default; virtual ~AudioDeviceDataObserver() = default; @@ -56,14 +56,14 @@ rtc::scoped_refptr CreateAudioDeviceWithDataObserver( // Creates an ADM instance with AudioDeviceDataObserver registered. rtc::scoped_refptr CreateAudioDeviceWithDataObserver( - const AudioDeviceModule::AudioLayer audio_layer, + AudioDeviceModule::AudioLayer audio_layer, TaskQueueFactory* task_queue_factory, std::unique_ptr observer); // Creates an ADM instance with AudioDeviceDataObserver registered. ABSL_DEPRECATED("") rtc::scoped_refptr CreateAudioDeviceWithDataObserver( - const AudioDeviceModule::AudioLayer audio_layer, + AudioDeviceModule::AudioLayer audio_layer, TaskQueueFactory* task_queue_factory, AudioDeviceDataObserver* observer); diff --git a/modules/audio_device/include/audio_device_defines.h b/modules/audio_device/include/audio_device_defines.h index 01129a47a9..89d33f8538 100644 --- a/modules/audio_device/include/audio_device_defines.h +++ b/modules/audio_device/include/audio_device_defines.h @@ -33,22 +33,43 @@ static const int kAdmMaxPlayoutBufferSizeMs = 250; class AudioTransport { public: + // TODO(bugs.webrtc.org/13620) Deprecate this function virtual int32_t RecordedDataIsAvailable(const void* audioSamples, - const size_t nSamples, - const size_t nBytesPerSample, - const size_t nChannels, - const uint32_t samplesPerSec, - const uint32_t totalDelayMS, - const int32_t clockDrift, - const uint32_t currentMicLevel, - const bool keyPressed, + size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samplesPerSec, + uint32_t totalDelayMS, + int32_t clockDrift, + uint32_t currentMicLevel, + bool keyPressed, uint32_t& newMicLevel) = 0; // NOLINT + virtual int32_t RecordedDataIsAvailable( + const void* audioSamples, + size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samplesPerSec, + uint32_t totalDelayMS, + int32_t clockDrift, + uint32_t currentMicLevel, + bool keyPressed, + uint32_t& newMicLevel, + int64_t estimatedCaptureTimeNS) { // NOLINT + // TODO(webrtc:13620) Make the default behaver of the new API to behave as + // the old API. This can be pure virtual if all uses of the old API is + // removed. + return RecordedDataIsAvailable( + audioSamples, nSamples, nBytesPerSample, nChannels, samplesPerSec, + totalDelayMS, clockDrift, currentMicLevel, keyPressed, newMicLevel); + } + // Implementation has to setup safe values for all specified out parameters. - virtual int32_t NeedMorePlayData(const size_t nSamples, - const size_t nBytesPerSample, - const size_t nChannels, - const uint32_t samplesPerSec, + virtual int32_t NeedMorePlayData(size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samplesPerSec, void* audioSamples, size_t& nSamplesOut, // NOLINT int64_t* elapsed_time_ms, diff --git a/modules/audio_device/include/audio_device_factory.cc b/modules/audio_device/include/audio_device_factory.cc index d5b381029e..130e096e6d 100644 --- a/modules/audio_device/include/audio_device_factory.cc +++ b/modules/audio_device/include/audio_device_factory.cc @@ -27,7 +27,7 @@ namespace webrtc { rtc::scoped_refptr CreateWindowsCoreAudioAudioDeviceModule( TaskQueueFactory* task_queue_factory, bool automatic_restart) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return CreateWindowsCoreAudioAudioDeviceModuleForTest(task_queue_factory, automatic_restart); } @@ -36,7 +36,7 @@ rtc::scoped_refptr CreateWindowsCoreAudioAudioDeviceModuleForTest( TaskQueueFactory* task_queue_factory, bool automatic_restart) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // Returns NULL if Core Audio is not supported or if COM has not been // initialized correctly using ScopedCOMInitializer. if (!webrtc_win::core_audio_utility::IsSupported()) { diff --git a/modules/audio_device/include/audio_device_factory.h b/modules/audio_device/include/audio_device_factory.h index 9c19d6196d..edd7686b8e 100644 --- a/modules/audio_device/include/audio_device_factory.h +++ b/modules/audio_device/include/audio_device_factory.h @@ -20,7 +20,7 @@ namespace webrtc { // Creates an AudioDeviceModule (ADM) for Windows based on the Core Audio API. // The creating thread must be a COM thread; otherwise nullptr will be returned. -// By default |automatic_restart| is set to true and it results in support for +// By default `automatic_restart` is set to true and it results in support for // automatic restart of audio if e.g. the existing device is removed. If set to // false, no attempt to restart audio is performed under these conditions. // diff --git a/modules/audio_device/include/mock_audio_transport.h b/modules/audio_device/include/mock_audio_transport.h index 8f71a2d71f..e1be5f422f 100644 --- a/modules/audio_device/include/mock_audio_transport.h +++ b/modules/audio_device/include/mock_audio_transport.h @@ -25,23 +25,38 @@ class MockAudioTransport : public AudioTransport { MOCK_METHOD(int32_t, RecordedDataIsAvailable, (const void* audioSamples, - const size_t nSamples, - const size_t nBytesPerSample, - const size_t nChannels, - const uint32_t samplesPerSec, - const uint32_t totalDelayMS, - const int32_t clockDrift, - const uint32_t currentMicLevel, - const bool keyPressed, + size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samplesPerSec, + uint32_t totalDelayMS, + int32_t clockDrift, + uint32_t currentMicLevel, + bool keyPressed, uint32_t& newMicLevel), (override)); + MOCK_METHOD(int32_t, + RecordedDataIsAvailable, + (const void* audioSamples, + size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samplesPerSec, + uint32_t totalDelayMS, + int32_t clockDrift, + uint32_t currentMicLevel, + bool keyPressed, + uint32_t& newMicLevel, + int64_t estimated_capture_time_ns), + (override)); + MOCK_METHOD(int32_t, NeedMorePlayData, - (const size_t nSamples, - const size_t nBytesPerSample, - const size_t nChannels, - const uint32_t samplesPerSec, + (size_t nSamples, + size_t nBytesPerSample, + size_t nChannels, + uint32_t samplesPerSec, void* audioSamples, size_t& nSamplesOut, int64_t* elapsed_time_ms, diff --git a/modules/audio_device/include/test_audio_device.cc b/modules/audio_device/include/test_audio_device.cc index 8351e8a405..d8ab22f29d 100644 --- a/modules/audio_device/include/test_audio_device.cc +++ b/modules/audio_device/include/test_audio_device.cc @@ -48,10 +48,10 @@ class TestAudioDeviceModuleImpl : public webrtc_impl::AudioDeviceModuleDefault { public: // Creates a new TestAudioDeviceModule. When capturing or playing, 10 ms audio - // frames will be processed every 10ms / |speed|. - // |capturer| is an object that produces audio data. Can be nullptr if this + // frames will be processed every 10ms / `speed`. + // `capturer` is an object that produces audio data. Can be nullptr if this // device is never used for recording. - // |renderer| is an object that receives audio data that would have been + // `renderer` is an object that receives audio data that would have been // played out. Can be nullptr if this device is never used for playing. // Use one of the Create... functions to get these instances. TestAudioDeviceModuleImpl(TaskQueueFactory* task_queue_factory, @@ -142,13 +142,13 @@ class TestAudioDeviceModuleImpl } // Blocks until the Renderer refuses to receive data. - // Returns false if |timeout_ms| passes before that happens. + // Returns false if `timeout_ms` passes before that happens. bool WaitForPlayoutEnd(int timeout_ms = rtc::Event::kForever) override { return done_rendering_.Wait(timeout_ms); } // Blocks until the Recorder stops producing data. - // Returns false if |timeout_ms| passes before that happens. + // Returns false if `timeout_ms` passes before that happens. bool WaitForRecordingEnd(int timeout_ms = rtc::Event::kForever) override { return done_capturing_.Wait(timeout_ms); } diff --git a/modules/audio_device/include/test_audio_device.h b/modules/audio_device/include/test_audio_device.h index 48888a4b51..fd006a3558 100644 --- a/modules/audio_device/include/test_audio_device.h +++ b/modules/audio_device/include/test_audio_device.h @@ -42,7 +42,7 @@ class TestAudioDeviceModule : public AudioDeviceModule { virtual int SamplingFrequency() const = 0; // Returns the number of channels of captured audio data. virtual int NumChannels() const = 0; - // Replaces the contents of |buffer| with 10ms of captured audio data + // Replaces the contents of `buffer` with 10ms of captured audio data // (see TestAudioDeviceModule::SamplesPerFrame). Returns true if the // capturer can keep producing data, or false when the capture finishes. virtual bool Capture(rtc::BufferT* buffer) = 0; @@ -73,10 +73,10 @@ class TestAudioDeviceModule : public AudioDeviceModule { ~TestAudioDeviceModule() override {} // Creates a new TestAudioDeviceModule. When capturing or playing, 10 ms audio - // frames will be processed every 10ms / |speed|. - // |capturer| is an object that produces audio data. Can be nullptr if this + // frames will be processed every 10ms / `speed`. + // `capturer` is an object that produces audio data. Can be nullptr if this // device is never used for recording. - // |renderer| is an object that receives audio data that would have been + // `renderer` is an object that receives audio data that would have been // played out. Can be nullptr if this device is never used for playing. // Use one of the Create... functions to get these instances. static rtc::scoped_refptr Create( @@ -85,9 +85,9 @@ class TestAudioDeviceModule : public AudioDeviceModule { std::unique_ptr renderer, float speed = 1); - // Returns a Capturer instance that generates a signal of |num_channels| + // Returns a Capturer instance that generates a signal of `num_channels` // channels where every second frame is zero and every second frame is evenly - // distributed random noise with max amplitude |max_amplitude|. + // distributed random noise with max amplitude `max_amplitude`. static std::unique_ptr CreatePulsedNoiseCapturer( int16_t max_amplitude, int sampling_frequency_in_hz, @@ -109,7 +109,7 @@ class TestAudioDeviceModule : public AudioDeviceModule { // Returns a Capturer instance that gets its data from a file. // Automatically detects sample rate and num of channels. - // |repeat| - if true, the file will be replayed from the start when we reach + // `repeat` - if true, the file will be replayed from the start when we reach // the end of file. static std::unique_ptr CreateWavFileReader(std::string filename, bool repeat = false); @@ -140,10 +140,10 @@ class TestAudioDeviceModule : public AudioDeviceModule { bool Recording() const override = 0; // Blocks until the Renderer refuses to receive data. - // Returns false if |timeout_ms| passes before that happens. + // Returns false if `timeout_ms` passes before that happens. virtual bool WaitForPlayoutEnd(int timeout_ms = rtc::Event::kForever) = 0; // Blocks until the Recorder stops producing data. - // Returns false if |timeout_ms| passes before that happens. + // Returns false if `timeout_ms` passes before that happens. virtual bool WaitForRecordingEnd(int timeout_ms = rtc::Event::kForever) = 0; }; diff --git a/modules/audio_device/linux/audio_device_alsa_linux.cc b/modules/audio_device/linux/audio_device_alsa_linux.cc index 60e01e1239..50cf3beb6c 100644 --- a/modules/audio_device/linux/audio_device_alsa_linux.cc +++ b/modules/audio_device/linux/audio_device_alsa_linux.cc @@ -10,7 +10,6 @@ #include "modules/audio_device/linux/audio_device_alsa_linux.h" -#include #include "modules/audio_device/audio_device_config.h" #include "rtc_base/logging.h" @@ -542,8 +541,6 @@ int32_t AudioDeviceLinuxALSA::MicrophoneVolumeIsAvailable(bool& available) { int32_t AudioDeviceLinuxALSA::SetMicrophoneVolume(uint32_t volume) { return (_mixerManager.SetMicrophoneVolume(volume)); - - return 0; } int32_t AudioDeviceLinuxALSA::MicrophoneVolume(uint32_t& volume) const { @@ -856,8 +853,6 @@ int32_t AudioDeviceLinuxALSA::InitPlayoutLocked() { } else { return -1; } - - return 0; } int32_t AudioDeviceLinuxALSA::InitRecording() { @@ -1000,8 +995,6 @@ int32_t AudioDeviceLinuxALSA::InitRecordingLocked() { } else { return -1; } - - return 0; } int32_t AudioDeviceLinuxALSA::StartRecording() { diff --git a/modules/audio_device/linux/audio_device_alsa_linux.h b/modules/audio_device/linux/audio_device_alsa_linux.h index 1f4a231640..23e21d3ce9 100644 --- a/modules/audio_device/linux/audio_device_alsa_linux.h +++ b/modules/audio_device/linux/audio_device_alsa_linux.h @@ -131,11 +131,11 @@ class AudioDeviceLinuxALSA : public AudioDeviceGeneric { int32_t InitPlayoutLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); int32_t InitSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); int32_t InitMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - int32_t GetDevicesInfo(const int32_t function, - const bool playback, - const int32_t enumDeviceNo = 0, + int32_t GetDevicesInfo(int32_t function, + bool playback, + int32_t enumDeviceNo = 0, char* enumDeviceName = NULL, - const int32_t ednLen = 0) const; + int32_t ednLen = 0) const; int32_t ErrorRecovery(int32_t error, snd_pcm_t* deviceHandle); bool KeyPressed() const; diff --git a/modules/audio_device/linux/audio_device_pulse_linux.cc b/modules/audio_device/linux/audio_device_pulse_linux.cc index 7742420fc2..4876c0fb91 100644 --- a/modules/audio_device/linux/audio_device_pulse_linux.cc +++ b/modules/audio_device/linux/audio_device_pulse_linux.cc @@ -1169,7 +1169,7 @@ int32_t AudioDeviceLinuxPulse::StartPlayout() { _startPlay = true; } - // Both |_startPlay| and |_playing| needs protction since they are also + // Both `_startPlay` and `_playing` needs protction since they are also // accessed on the playout thread. // The audio thread will signal when playout has started. diff --git a/modules/audio_device/linux/latebindingsymboltable_linux.h b/modules/audio_device/linux/latebindingsymboltable_linux.h index 6cfb659749..9484b075ef 100644 --- a/modules/audio_device/linux/latebindingsymboltable_linux.h +++ b/modules/audio_device/linux/latebindingsymboltable_linux.h @@ -15,7 +15,6 @@ #include #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" // This file provides macros for creating "symbol table" classes to simplify the // dynamic loading of symbols from DLLs. Currently the implementation only @@ -55,6 +54,9 @@ class LateBindingSymbolTable { ~LateBindingSymbolTable() { Unload(); } + LateBindingSymbolTable(const LateBindingSymbolTable&) = delete; + LateBindingSymbolTable& operator=(LateBindingSymbolTable&) = delete; + static int NumSymbols() { return SYMBOL_TABLE_SIZE; } // We do not use this, but we offer it for theoretical convenience. @@ -109,8 +111,6 @@ class LateBindingSymbolTable { DllHandle handle_; bool undefined_symbols_; void* symbols_[SYMBOL_TABLE_SIZE]; - - RTC_DISALLOW_COPY_AND_ASSIGN(LateBindingSymbolTable); }; // This macro must be invoked in a header to declare a symbol table class. diff --git a/modules/audio_device/mac/audio_device_mac.cc b/modules/audio_device/mac/audio_device_mac.cc index 2088b017a0..bf324da1a7 100644 --- a/modules/audio_device/mac/audio_device_mac.cc +++ b/modules/audio_device/mac/audio_device_mac.cc @@ -11,7 +11,6 @@ #include "modules/audio_device/mac/audio_device_mac.h" #include -#include // OSAtomicCompareAndSwap() #include // mach_task_self() #include // sysctlbyname() @@ -53,24 +52,6 @@ namespace webrtc { enum { MaxNumberDevices = 64 }; -void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue) { - while (1) { - int32_t oldValue = *theValue; - if (OSAtomicCompareAndSwap32Barrier(oldValue, newValue, theValue) == true) { - return; - } - } -} - -int32_t AudioDeviceMac::AtomicGet32(int32_t* theValue) { - while (1) { - int32_t value = *theValue; - if (OSAtomicCompareAndSwap32Barrier(value, value, theValue) == true) { - return value; - } - } -} - // CoreAudio errors are best interpreted as four character strings. void AudioDeviceMac::logCAMsg(const rtc::LoggingSeverity sev, const char* msg, @@ -1339,7 +1320,7 @@ int32_t AudioDeviceMac::StopRecording() { } OSStatus err = noErr; - int32_t captureDeviceIsAlive = AtomicGet32(&_captureDeviceIsAlive); + int32_t captureDeviceIsAlive = _captureDeviceIsAlive; if (_twoDevices && captureDeviceIsAlive == 1) { // Recording side uses its own dedicated device and IOProc. if (_recording) { @@ -1365,7 +1346,7 @@ int32_t AudioDeviceMac::StopRecording() { } else { // We signal a stop for a shared device even when rendering has // not yet ended. This is to ensure the IOProc will return early as - // intended (by checking |_recording|) before accessing + // intended (by checking `_recording`) before accessing // resources we free below (e.g. the capture converter). // // In the case of a shared devcie, the IOProc will verify @@ -1395,7 +1376,7 @@ int32_t AudioDeviceMac::StopRecording() { } // Setting this signal will allow the worker thread to be stopped. - AtomicSet32(&_captureDeviceIsAlive, 0); + _captureDeviceIsAlive = 0; if (!capture_worker_thread_.empty()) { mutex_.Unlock(); @@ -1472,11 +1453,11 @@ int32_t AudioDeviceMac::StopPlayout() { } OSStatus err = noErr; - int32_t renderDeviceIsAlive = AtomicGet32(&_renderDeviceIsAlive); + int32_t renderDeviceIsAlive = _renderDeviceIsAlive; if (_playing && renderDeviceIsAlive == 1) { // We signal a stop for a shared device even when capturing has not // yet ended. This is to ensure the IOProc will return early as - // intended (by checking |_playing|) before accessing resources we + // intended (by checking `_playing`) before accessing resources we // free below (e.g. the render converter). // // In the case of a shared device, the IOProc will verify capturing @@ -1509,7 +1490,7 @@ int32_t AudioDeviceMac::StopPlayout() { } // Setting this signal will allow the worker thread to be stopped. - AtomicSet32(&_renderDeviceIsAlive, 0); + _renderDeviceIsAlive = 0; if (!render_worker_thread_.empty()) { mutex_.Unlock(); render_worker_thread_.Finalize(); @@ -1545,7 +1526,7 @@ int32_t AudioDeviceMac::StopPlayout() { } int32_t AudioDeviceMac::PlayoutDelay(uint16_t& delayMS) const { - int32_t renderDelayUs = AtomicGet32(&_renderDelayUs); + int32_t renderDelayUs = _renderDelayUs; delayMS = static_cast(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5); return 0; @@ -1954,7 +1935,7 @@ int32_t AudioDeviceMac::HandleDeviceChange() { if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) { RTC_LOG(LS_WARNING) << "Capture device is not alive (probably removed)"; - AtomicSet32(&_captureDeviceIsAlive, 0); + _captureDeviceIsAlive = 0; _mixerManager.CloseMicrophone(); } else if (err != noErr) { logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()", @@ -1973,7 +1954,7 @@ int32_t AudioDeviceMac::HandleDeviceChange() { if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) { RTC_LOG(LS_WARNING) << "Render device is not alive (probably removed)"; - AtomicSet32(&_renderDeviceIsAlive, 0); + _renderDeviceIsAlive = 0; _mixerManager.CloseSpeaker(); } else if (err != noErr) { logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()", @@ -2248,7 +2229,7 @@ OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList* inputData, _outDesiredFormat.mSampleRate + 0.5); - AtomicSet32(&_renderDelayUs, renderDelayUs); + _renderDelayUs = renderDelayUs; return 0; } @@ -2317,7 +2298,7 @@ OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList* inputData, _inStreamFormat.mSampleRate + 0.5); - AtomicSet32(&_captureDelayUs, captureDelayUs); + _captureDelayUs = captureDelayUs; RTC_DCHECK(inputData->mNumberBuffers == 1); PaRingBufferSize numSamples = inputData->mBuffers->mDataByteSize * @@ -2347,7 +2328,7 @@ OSStatus AudioDeviceMac::implInConverterProc(UInt32* numberDataPackets, kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout); if (kernErr == KERN_OPERATION_TIMED_OUT) { - int32_t signal = AtomicGet32(&_captureDeviceIsAlive); + int32_t signal = _captureDeviceIsAlive; if (signal == 0) { // The capture device is no longer alive; stop the worker thread. *numberDataPackets = 0; @@ -2386,7 +2367,7 @@ bool AudioDeviceMac::RenderWorkerThread() { kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout); if (kernErr == KERN_OPERATION_TIMED_OUT) { - int32_t signal = AtomicGet32(&_renderDeviceIsAlive); + int32_t signal = _renderDeviceIsAlive; if (signal == 0) { // The render device is no longer alive; stop the worker thread. return false; @@ -2471,8 +2452,8 @@ bool AudioDeviceMac::CaptureWorkerThread() { int32_t msecOnPlaySide; int32_t msecOnRecordSide; - int32_t captureDelayUs = AtomicGet32(&_captureDelayUs); - int32_t renderDelayUs = AtomicGet32(&_renderDelayUs); + int32_t captureDelayUs = _captureDelayUs; + int32_t renderDelayUs = _renderDelayUs; msecOnPlaySide = static_cast(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5); diff --git a/modules/audio_device/mac/audio_device_mac.h b/modules/audio_device/mac/audio_device_mac.h index f9504b64b5..fe0d3beed8 100644 --- a/modules/audio_device/mac/audio_device_mac.h +++ b/modules/audio_device/mac/audio_device_mac.h @@ -15,6 +15,7 @@ #include #include +#include #include #include "modules/audio_device/audio_device_generic.h" @@ -168,16 +169,16 @@ class AudioDeviceMac : public AudioDeviceGeneric { static void AtomicSet32(int32_t* theValue, int32_t newValue); static int32_t AtomicGet32(int32_t* theValue); - static void logCAMsg(const rtc::LoggingSeverity sev, + static void logCAMsg(rtc::LoggingSeverity sev, const char* msg, const char* err); - int32_t GetNumberDevices(const AudioObjectPropertyScope scope, + int32_t GetNumberDevices(AudioObjectPropertyScope scope, AudioDeviceID scopedDeviceIds[], - const uint32_t deviceListLength); + uint32_t deviceListLength); - int32_t GetDeviceName(const AudioObjectPropertyScope scope, - const uint16_t index, + int32_t GetDeviceName(AudioObjectPropertyScope scope, + uint16_t index, char* name); int32_t InitDevice(uint16_t userDeviceIndex, @@ -303,8 +304,8 @@ class AudioDeviceMac : public AudioDeviceGeneric { bool _playIsInitialized; // Atomically set varaibles - int32_t _renderDeviceIsAlive; - int32_t _captureDeviceIsAlive; + std::atomic _renderDeviceIsAlive; + std::atomic _captureDeviceIsAlive; bool _twoDevices; bool _doStop; // For play if not shared device or play+rec if shared device @@ -324,8 +325,8 @@ class AudioDeviceMac : public AudioDeviceGeneric { uint32_t _renderLatencyUs; // Atomically set variables - mutable int32_t _captureDelayUs; - mutable int32_t _renderDelayUs; + mutable std::atomic _captureDelayUs; + mutable std::atomic _renderDelayUs; int32_t _renderDelayOffsetSamples; diff --git a/modules/audio_device/mac/audio_mixer_manager_mac.h b/modules/audio_device/mac/audio_mixer_manager_mac.h index 17e34ff17d..0ccab4879b 100644 --- a/modules/audio_device/mac/audio_mixer_manager_mac.h +++ b/modules/audio_device/mac/audio_mixer_manager_mac.h @@ -54,7 +54,7 @@ class AudioMixerManagerMac { private: int32_t CloseSpeakerLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); int32_t CloseMicrophoneLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - static void logCAMsg(const rtc::LoggingSeverity sev, + static void logCAMsg(rtc::LoggingSeverity sev, const char* msg, const char* err); diff --git a/modules/audio_device/win/audio_device_core_win.cc b/modules/audio_device/win/audio_device_core_win.cc index a3723edb56..096e49cc12 100644 --- a/modules/audio_device/win/audio_device_core_win.cc +++ b/modules/audio_device/win/audio_device_core_win.cc @@ -25,7 +25,6 @@ #include "modules/audio_device/win/audio_device_core_win.h" -#include #include #include @@ -344,33 +343,33 @@ bool AudioDeviceWindowsCore::CoreAudioIsSupported() { // ---------------------------------------------------------------------------- AudioDeviceWindowsCore::AudioDeviceWindowsCore() - : _avrtLibrary(NULL), + : _avrtLibrary(nullptr), _winSupportAvrt(false), _comInit(ScopedCOMInitializer::kMTA), - _ptrAudioBuffer(NULL), - _ptrEnumerator(NULL), - _ptrRenderCollection(NULL), - _ptrCaptureCollection(NULL), - _ptrDeviceOut(NULL), - _ptrDeviceIn(NULL), - _ptrClientOut(NULL), - _ptrClientIn(NULL), - _ptrRenderClient(NULL), - _ptrCaptureClient(NULL), - _ptrCaptureVolume(NULL), - _ptrRenderSimpleVolume(NULL), - _dmo(NULL), - _mediaBuffer(NULL), + _ptrAudioBuffer(nullptr), + _ptrEnumerator(nullptr), + _ptrRenderCollection(nullptr), + _ptrCaptureCollection(nullptr), + _ptrDeviceOut(nullptr), + _ptrDeviceIn(nullptr), + _ptrClientOut(nullptr), + _ptrClientIn(nullptr), + _ptrRenderClient(nullptr), + _ptrCaptureClient(nullptr), + _ptrCaptureVolume(nullptr), + _ptrRenderSimpleVolume(nullptr), + _dmo(nullptr), + _mediaBuffer(nullptr), _builtInAecEnabled(false), - _hRenderSamplesReadyEvent(NULL), - _hPlayThread(NULL), - _hRenderStartedEvent(NULL), - _hShutdownRenderEvent(NULL), - _hCaptureSamplesReadyEvent(NULL), - _hRecThread(NULL), - _hCaptureStartedEvent(NULL), - _hShutdownCaptureEvent(NULL), - _hMmTask(NULL), + _hRenderSamplesReadyEvent(nullptr), + _hPlayThread(nullptr), + _hRenderStartedEvent(nullptr), + _hShutdownRenderEvent(nullptr), + _hCaptureSamplesReadyEvent(nullptr), + _hRecThread(nullptr), + _hCaptureStartedEvent(nullptr), + _hShutdownCaptureEvent(nullptr), + _hMmTask(nullptr), _playAudioFrameSize(0), _playSampleRate(0), _playBlockSize(0), @@ -1888,18 +1887,18 @@ int32_t AudioDeviceWindowsCore::InitPlayout() { break; } else { if (pWfxClosestMatch) { - RTC_LOG(INFO) << "nChannels=" << Wfx.nChannels - << ", nSamplesPerSec=" << Wfx.nSamplesPerSec - << " is not supported. Closest match: " - "nChannels=" - << pWfxClosestMatch->nChannels << ", nSamplesPerSec=" - << pWfxClosestMatch->nSamplesPerSec; + RTC_LOG(LS_INFO) << "nChannels=" << Wfx.nChannels + << ", nSamplesPerSec=" << Wfx.nSamplesPerSec + << " is not supported. Closest match: " + "nChannels=" + << pWfxClosestMatch->nChannels << ", nSamplesPerSec=" + << pWfxClosestMatch->nSamplesPerSec; CoTaskMemFree(pWfxClosestMatch); pWfxClosestMatch = NULL; } else { - RTC_LOG(INFO) << "nChannels=" << Wfx.nChannels - << ", nSamplesPerSec=" << Wfx.nSamplesPerSec - << " is not supported. No closest match."; + RTC_LOG(LS_INFO) << "nChannels=" << Wfx.nChannels + << ", nSamplesPerSec=" << Wfx.nSamplesPerSec + << " is not supported. No closest match."; } } } @@ -2209,18 +2208,18 @@ int32_t AudioDeviceWindowsCore::InitRecording() { break; } else { if (pWfxClosestMatch) { - RTC_LOG(INFO) << "nChannels=" << Wfx.Format.nChannels - << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec - << " is not supported. Closest match: " - "nChannels=" - << pWfxClosestMatch->nChannels << ", nSamplesPerSec=" - << pWfxClosestMatch->nSamplesPerSec; + RTC_LOG(LS_INFO) << "nChannels=" << Wfx.Format.nChannels + << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec + << " is not supported. Closest match: " + "nChannels=" + << pWfxClosestMatch->nChannels << ", nSamplesPerSec=" + << pWfxClosestMatch->nSamplesPerSec; CoTaskMemFree(pWfxClosestMatch); pWfxClosestMatch = NULL; } else { - RTC_LOG(INFO) << "nChannels=" << Wfx.Format.nChannels - << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec - << " is not supported. No closest match."; + RTC_LOG(LS_INFO) << "nChannels=" << Wfx.Format.nChannels + << ", nSamplesPerSec=" << Wfx.Format.nSamplesPerSec + << " is not supported. No closest match."; } } } @@ -2356,7 +2355,7 @@ int32_t AudioDeviceWindowsCore::StartRecording() { } } - RTC_DCHECK(_hRecThread); + RTC_DCHECK(_hRecThread == NULL); _hRecThread = CreateThread(NULL, 0, lpStartAddress, this, 0, NULL); if (_hRecThread == NULL) { RTC_LOG(LS_ERROR) << "failed to create the recording thread"; @@ -2493,7 +2492,7 @@ int32_t AudioDeviceWindowsCore::StartPlayout() { MutexLock lockScoped(&mutex_); // Create thread which will drive the rendering. - RTC_DCHECK(_hPlayThread); + RTC_DCHECK(_hPlayThread == NULL); _hPlayThread = CreateThread(NULL, 0, WSAPIRenderThread, this, 0, NULL); if (_hPlayThread == NULL) { RTC_LOG(LS_ERROR) << "failed to create the playout thread"; @@ -3001,8 +3000,8 @@ DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() { dmoBuffer.pBuffer->AddRef(); // Poll the DMO for AEC processed capture data. The DMO will - // copy available data to |dmoBuffer|, and should only return - // 10 ms frames. The value of |dwStatus| should be ignored. + // copy available data to `dmoBuffer`, and should only return + // 10 ms frames. The value of `dwStatus` should be ignored. hr = _dmo->ProcessOutput(0, 1, &dmoBuffer, &dwStatus); SAFE_RELEASE(dmoBuffer.pBuffer); dwStatus = dmoBuffer.dwStatus; @@ -3010,7 +3009,7 @@ DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() { if (FAILED(hr)) { _TraceCOMError(hr); keepRecording = false; - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); break; } @@ -3022,7 +3021,7 @@ DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() { if (FAILED(hr)) { _TraceCOMError(hr); keepRecording = false; - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); break; } @@ -3047,7 +3046,7 @@ DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() { if (FAILED(hr)) { _TraceCOMError(hr); keepRecording = false; - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); break; } diff --git a/modules/audio_device/win/audio_device_module_win.cc b/modules/audio_device/win/audio_device_module_win.cc index 8cc4b7fc36..6643d8479e 100644 --- a/modules/audio_device/win/audio_device_module_win.cc +++ b/modules/audio_device/win/audio_device_module_win.cc @@ -95,12 +95,12 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { task_queue_factory_(task_queue_factory) { RTC_CHECK(input_); RTC_CHECK(output_); - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); } ~WindowsAudioDeviceModule() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); Terminate(); } @@ -110,7 +110,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t ActiveAudioLayer( AudioDeviceModule::AudioLayer* audioLayer) const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); // TODO(henrika): it might be possible to remove this unique signature. *audioLayer = AudioDeviceModule::kWindowsCoreAudio2; @@ -118,14 +118,14 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } int32_t RegisterAudioCallback(AudioTransport* audioCallback) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(audio_device_buffer_); RTC_DCHECK_RUN_ON(&thread_checker_); return audio_device_buffer_->RegisterAudioCallback(audioCallback); } int32_t Init() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); RETURN_IF_INPUT_RESTARTS(0); @@ -153,7 +153,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } int32_t Terminate() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); RETURN_IF_INPUT_RESTARTS(0); @@ -172,14 +172,14 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } int16_t PlayoutDevices() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); return output_->NumDevices(); } int16_t RecordingDevices() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_INPUT_RESTARTS(0); return input_->NumDevices(); @@ -188,7 +188,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t PlayoutDeviceName(uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); std::string name_str, guid_str; @@ -205,7 +205,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t RecordingDeviceName(uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_INPUT_RESTARTS(0); std::string name_str, guid_str; @@ -221,7 +221,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } int32_t SetPlayoutDevice(uint16_t index) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); return output_->SetDevice(index); @@ -229,33 +229,33 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t SetPlayoutDevice( AudioDeviceModule::WindowsDeviceType device) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); return output_->SetDevice(device); } int32_t SetRecordingDevice(uint16_t index) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return input_->SetDevice(index); } int32_t SetRecordingDevice( AudioDeviceModule::WindowsDeviceType device) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return input_->SetDevice(device); } int32_t PlayoutIsAvailable(bool* available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *available = true; return 0; } int32_t InitPlayout() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); RETURN_IF_OUTPUT_IS_INITIALIZED(0); @@ -263,21 +263,21 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } bool PlayoutIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(true); return output_->PlayoutIsInitialized(); } int32_t RecordingIsAvailable(bool* available) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *available = true; return 0; } int32_t InitRecording() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_INPUT_RESTARTS(0); RETURN_IF_INPUT_IS_INITIALIZED(0); @@ -285,14 +285,14 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } bool RecordingIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_INPUT_RESTARTS(true); return input_->RecordingIsInitialized(); } int32_t StartPlayout() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); RETURN_IF_OUTPUT_IS_ACTIVE(0); @@ -300,21 +300,21 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } int32_t StopPlayout() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(-1); return output_->StopPlayout(); } bool Playing() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(true); return output_->Playing(); } int32_t StartRecording() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_INPUT_RESTARTS(0); RETURN_IF_INPUT_IS_ACTIVE(0); @@ -322,41 +322,41 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { } int32_t StopRecording() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_INPUT_RESTARTS(-1); return input_->StopRecording(); } bool Recording() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RETURN_IF_INPUT_RESTARTS(true); return input_->Recording(); } int32_t InitSpeaker() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RTC_DLOG(LS_WARNING) << "This method has no effect"; return initialized_ ? 0 : -1; } bool SpeakerIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RTC_DLOG(LS_WARNING) << "This method has no effect"; return initialized_; } int32_t InitMicrophone() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RTC_DLOG(LS_WARNING) << "This method has no effect"; return initialized_ ? 0 : -1; } bool MicrophoneIsInitialized() const override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RTC_DLOG(LS_WARNING) << "This method has no effect"; return initialized_; @@ -364,7 +364,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t SpeakerVolumeIsAvailable(bool* available) override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *available = false; return 0; @@ -377,7 +377,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t MicrophoneVolumeIsAvailable(bool* available) override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *available = false; return 0; @@ -398,7 +398,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t StereoPlayoutIsAvailable(bool* available) const override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *available = true; return 0; @@ -406,14 +406,14 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t SetStereoPlayout(bool enable) override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return 0; } int32_t StereoPlayout(bool* enabled) const override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *enabled = true; return 0; @@ -421,7 +421,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t StereoRecordingIsAvailable(bool* available) const override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *available = true; return 0; @@ -429,14 +429,14 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t SetStereoRecording(bool enable) override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return 0; } int32_t StereoRecording(bool* enabled) const override { // TODO(henrika): improve support. - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); *enabled = true; return 0; @@ -453,33 +453,33 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { int32_t EnableBuiltInNS(bool enable) override { return 0; } int32_t AttachAudioBuffer() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; output_->AttachAudioBuffer(audio_device_buffer_.get()); input_->AttachAudioBuffer(audio_device_buffer_.get()); return 0; } int RestartPlayoutInternally() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); RETURN_IF_OUTPUT_RESTARTS(0); return output_->RestartPlayout(); } int RestartRecordingInternally() override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return input_->RestartRecording(); } int SetPlayoutSampleRate(uint32_t sample_rate) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return output_->SetSampleRate(sample_rate); } int SetRecordingSampleRate(uint32_t sample_rate) override { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return input_->SetSampleRate(sample_rate); } @@ -499,7 +499,7 @@ class WindowsAudioDeviceModule : public AudioDeviceModuleForTest { // The AudioDeviceBuffer (ADB) instance is needed for sending/receiving audio // to/from the WebRTC layer. Created and owned by this object. Used by - // both |input_| and |output_| but they use orthogonal parts of the ADB. + // both `input_` and `output_` but they use orthogonal parts of the ADB. std::unique_ptr audio_device_buffer_; // Set to true after a successful call to Init(). Cleared by Terminate(). @@ -513,7 +513,7 @@ CreateWindowsCoreAudioAudioDeviceModuleFromInputAndOutput( std::unique_ptr audio_input, std::unique_ptr audio_output, TaskQueueFactory* task_queue_factory) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; return rtc::make_ref_counted( std::move(audio_input), std::move(audio_output), task_queue_factory); } diff --git a/modules/audio_device/win/core_audio_base_win.cc b/modules/audio_device/win/core_audio_base_win.cc index 7d93fcb14a..f43c068fb2 100644 --- a/modules/audio_device/win/core_audio_base_win.cc +++ b/modules/audio_device/win/core_audio_base_win.cc @@ -35,7 +35,7 @@ namespace { // TODO(henrika): more research is needed before we can enable low-latency. const bool kEnableLowLatencyIfSupported = false; -// Each unit of reference time is 100 nanoseconds, hence |kReftimesPerSec| +// Each unit of reference time is 100 nanoseconds, hence `kReftimesPerSec` // corresponds to one second. // TODO(henrika): possibly add usage in Init(). // const REFERENCE_TIME kReferenceTimesPerSecond = 10000000; @@ -125,7 +125,7 @@ const char* SessionDisconnectReasonToString( bool IsLowLatencySupported(IAudioClient3* client3, const WAVEFORMATEXTENSIBLE* format, uint32_t* min_period_in_frames) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // Get the range of periodicities supported by the engine for the specified // stream format. @@ -143,7 +143,7 @@ bool IsLowLatencySupported(IAudioClient3* client3, // default engine period. // TODO(henrika): verify that this assumption is correct. const bool low_latency = min_period < default_period; - RTC_LOG(INFO) << "low_latency: " << low_latency; + RTC_LOG(LS_INFO) << "low_latency: " << low_latency; *min_period_in_frames = low_latency ? min_period : 0; return low_latency; } @@ -161,9 +161,10 @@ CoreAudioBase::CoreAudioBase(Direction direction, on_error_callback_(error_callback), device_index_(kUndefined), is_restarting_(false) { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction) << "]"; - RTC_DLOG(INFO) << "Automatic restart: " << automatic_restart; - RTC_DLOG(INFO) << "Windows version: " << rtc::rtc_win::GetVersion(); + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction) + << "]"; + RTC_DLOG(LS_INFO) << "Automatic restart: " << automatic_restart; + RTC_DLOG(LS_INFO) << "Windows version: " << rtc::rtc_win::GetVersion(); // Create the event which the audio engine will signal each time a buffer // becomes ready to be processed by the client. @@ -181,7 +182,7 @@ CoreAudioBase::CoreAudioBase(Direction direction, } CoreAudioBase::~CoreAudioBase() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_EQ(ref_count_, 1); } @@ -207,7 +208,7 @@ int CoreAudioBase::NumberOfEnumeratedDevices() const { } void CoreAudioBase::ReleaseCOMObjects() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; // ComPtr::Reset() sets the ComPtr to nullptr releasing any previous // reference. if (audio_client_) { @@ -230,9 +231,9 @@ bool CoreAudioBase::IsDefaultCommunicationsDevice(int index) const { } bool CoreAudioBase::IsDefaultDeviceId(const std::string& device_id) const { - // Returns true if |device_id| corresponds to the id of the default + // Returns true if `device_id` corresponds to the id of the default // device. Note that, if only one device is available (or if the user has not - // explicitly set a default device), |device_id| will also math + // explicitly set a default device), `device_id` will also math // IsDefaultCommunicationsDeviceId(). return (IsInput() && (device_id == core_audio_utility::GetDefaultInputDeviceID())) || @@ -242,9 +243,9 @@ bool CoreAudioBase::IsDefaultDeviceId(const std::string& device_id) const { bool CoreAudioBase::IsDefaultCommunicationsDeviceId( const std::string& device_id) const { - // Returns true if |device_id| corresponds to the id of the default + // Returns true if `device_id` corresponds to the id of the default // communication device. Note that, if only one device is available (or if - // the user has not explicitly set a communication device), |device_id| will + // the user has not explicitly set a communication device), `device_id` will // also math IsDefaultDeviceId(). return (IsInput() && (device_id == @@ -288,15 +289,15 @@ std::string CoreAudioBase::GetDeviceID(int index) const { } int CoreAudioBase::SetDevice(int index) { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]: index=" << IndexToString(index); + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]: index=" << IndexToString(index); if (initialized_) { return -1; } std::string device_id = GetDeviceID(index); - RTC_DLOG(INFO) << "index=" << IndexToString(index) - << " => device_id: " << device_id; + RTC_DLOG(LS_INFO) << "index=" << IndexToString(index) + << " => device_id: " << device_id; device_index_ = index; device_id_ = device_id; @@ -306,8 +307,8 @@ int CoreAudioBase::SetDevice(int index) { int CoreAudioBase::DeviceName(int index, std::string* name, std::string* guid) const { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]: index=" << IndexToString(index); + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]: index=" << IndexToString(index); if (index > NumberOfEnumeratedDevices() - 1) { RTC_LOG(LS_ERROR) << "Invalid device index"; return -1; @@ -324,26 +325,26 @@ int CoreAudioBase::DeviceName(int index, } *name = device_names[index].device_name; - RTC_DLOG(INFO) << "name: " << *name; + RTC_DLOG(LS_INFO) << "name: " << *name; if (guid != nullptr) { *guid = device_names[index].unique_id; - RTC_DLOG(INFO) << "guid: " << *guid; + RTC_DLOG(LS_INFO) << "guid: " << *guid; } return 0; } bool CoreAudioBase::Init() { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; RTC_DCHECK_GE(device_index_, 0); RTC_DCHECK(!device_id_.empty()); RTC_DCHECK(audio_device_buffer_); RTC_DCHECK(!audio_client_); RTC_DCHECK(!audio_session_control_.Get()); - // Use an existing combination of |device_index_| and |device_id_| to set + // Use an existing combination of `device_index_` and `device_id_` to set // parameters which are required to create an audio client. It is up to the - // parent class to set |device_index_| and |device_id_|. + // parent class to set `device_index_` and `device_id_`. std::string device_id = AudioDeviceName::kDefaultDeviceId; ERole role = ERole(); if (IsDefaultDevice(device_index_)) { @@ -360,15 +361,15 @@ bool CoreAudioBase::Init() { // an audio stream between an audio application and the audio engine. ComPtr audio_client; if (core_audio_utility::GetAudioClientVersion() == 3) { - RTC_DLOG(INFO) << "Using IAudioClient3"; + RTC_DLOG(LS_INFO) << "Using IAudioClient3"; audio_client = core_audio_utility::CreateClient3(device_id, GetDataFlow(), role); } else if (core_audio_utility::GetAudioClientVersion() == 2) { - RTC_DLOG(INFO) << "Using IAudioClient2"; + RTC_DLOG(LS_INFO) << "Using IAudioClient2"; audio_client = core_audio_utility::CreateClient2(device_id, GetDataFlow(), role); } else { - RTC_DLOG(INFO) << "Using IAudioClient"; + RTC_DLOG(LS_INFO) << "Using IAudioClient"; audio_client = core_audio_utility::CreateClient(device_id, GetDataFlow(), role); } @@ -400,7 +401,7 @@ bool CoreAudioBase::Init() { return false; } - // Define the output WAVEFORMATEXTENSIBLE format in |format_|. + // Define the output WAVEFORMATEXTENSIBLE format in `format_`. WAVEFORMATEX* format = &format_.Format; format->wFormatTag = WAVE_FORMAT_EXTENSIBLE; // Check the preferred channel configuration and request implicit channel @@ -429,7 +430,7 @@ bool CoreAudioBase::Init() { format_.dwChannelMask = format->nChannels == 1 ? KSAUDIO_SPEAKER_MONO : KSAUDIO_SPEAKER_STEREO; format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; - RTC_DLOG(INFO) << core_audio_utility::WaveFormatToString(&format_); + RTC_DLOG(LS_INFO) << core_audio_utility::WaveFormatToString(&format_); // Verify that the format is supported but exclude the test if the default // sample rate has been overridden. If so, the WASAPI audio engine will do @@ -448,7 +449,7 @@ bool CoreAudioBase::Init() { // - HDAudio driver // - kEnableLowLatencyIfSupported changed from false (default) to true. // TODO(henrika): IsLowLatencySupported() returns AUDCLNT_E_UNSUPPORTED_FORMAT - // when |sample_rate_.has_value()| returns true if rate conversion is + // when `sample_rate_.has_value()` returns true if rate conversion is // actually required (i.e., client asks for other than the default rate). bool low_latency_support = false; uint32_t min_period_in_frames = 0; @@ -475,7 +476,7 @@ bool CoreAudioBase::Init() { // Initialize the audio stream between the client and the device in shared // mode using event-driven buffer handling. Also, using 0 as requested // buffer size results in a default (minimum) endpoint buffer size. - // TODO(henrika): possibly increase |requested_buffer_size| to add + // TODO(henrika): possibly increase `requested_buffer_size` to add // robustness. const REFERENCE_TIME requested_buffer_size = 0; if (FAILED(core_audio_utility::SharedModeInitialize( @@ -502,12 +503,12 @@ bool CoreAudioBase::Init() { 1000.0L; const int preferred_frames_per_buffer = static_cast(params.sample_rate() * device_period_in_seconds + 0.5); - RTC_DLOG(INFO) << "preferred_frames_per_buffer: " - << preferred_frames_per_buffer; + RTC_DLOG(LS_INFO) << "preferred_frames_per_buffer: " + << preferred_frames_per_buffer; if (preferred_frames_per_buffer % params.frames_per_buffer()) { - RTC_LOG(WARNING) << "Buffer size of " << params.frames_per_buffer() - << " is not an even divisor of " - << preferred_frames_per_buffer; + RTC_LOG(LS_WARNING) << "Buffer size of " << params.frames_per_buffer() + << " is not an even divisor of " + << preferred_frames_per_buffer; } // Create an AudioSessionControl interface given the initialized client. @@ -525,7 +526,7 @@ bool CoreAudioBase::Init() { if (FAILED(audio_session_control->GetState(&state))) { return false; } - RTC_DLOG(INFO) << "audio session state: " << SessionStateToString(state); + RTC_DLOG(LS_INFO) << "audio session state: " << SessionStateToString(state); RTC_DCHECK_EQ(state, AudioSessionStateInactive); // Register the client to receive notifications of session events, including @@ -542,8 +543,8 @@ bool CoreAudioBase::Init() { } bool CoreAudioBase::Start() { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; if (IsRestarting()) { // Audio thread should be alive during internal restart since the restart // callback is triggered on that thread and it also makes the restart @@ -559,8 +560,8 @@ bool CoreAudioBase::Start() { audio_thread_ = rtc::PlatformThread::SpawnJoinable( [this] { ThreadRun(); }, name, rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kRealtime)); - RTC_DLOG(INFO) << "Started thread with name: " << name - << " and handle: " << *audio_thread_.GetHandle(); + RTC_DLOG(LS_INFO) << "Started thread with name: " << name + << " and handle: " << *audio_thread_.GetHandle(); } // Start streaming data between the endpoint buffer and the audio engine. @@ -579,9 +580,9 @@ bool CoreAudioBase::Start() { } bool CoreAudioBase::Stop() { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]"; - RTC_DLOG(INFO) << "total activity time: " << TimeSinceStart(); + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; + RTC_DLOG(LS_INFO) << "total activity time: " << TimeSinceStart(); // Stop audio streaming. _com_error error = audio_client_->Stop(); @@ -614,8 +615,8 @@ bool CoreAudioBase::Stop() { // Delete the previous registration by the client to receive notifications // about audio session events. - RTC_DLOG(INFO) << "audio session state: " - << SessionStateToString(GetAudioSessionState()); + RTC_DLOG(LS_INFO) << "audio session state: " + << SessionStateToString(GetAudioSessionState()); error = audio_session_control_->UnregisterAudioSessionNotification(this); if (FAILED(error.Error())) { RTC_LOG(LS_ERROR) @@ -663,7 +664,7 @@ bool CoreAudioBase::IsVolumeControlAvailable(bool* available) const { << core_audio_utility::ErrorToString(error); *available = false; } - RTC_DLOG(INFO) << "master volume for output audio session: " << volume; + RTC_DLOG(LS_INFO) << "master volume for output audio session: " << volume; *available = true; return false; @@ -674,8 +675,8 @@ bool CoreAudioBase::IsVolumeControlAvailable(bool* available) const { // device notifications. Hence, the emulated restart sequence covers most parts // of a real sequence expect the actual device switch. bool CoreAudioBase::Restart() { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; if (!automatic_restart()) { return false; } @@ -685,12 +686,12 @@ bool CoreAudioBase::Restart() { } void CoreAudioBase::StopThread() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(!IsRestarting()); if (!audio_thread_.empty()) { - RTC_DLOG(INFO) << "Sets stop_event..."; + RTC_DLOG(LS_INFO) << "Sets stop_event..."; SetEvent(stop_event_.Get()); - RTC_DLOG(INFO) << "PlatformThread::Finalize..."; + RTC_DLOG(LS_INFO) << "PlatformThread::Finalize..."; audio_thread_.Finalize(); // Ensure that we don't quit the main thread loop immediately next @@ -701,8 +702,8 @@ void CoreAudioBase::StopThread() { } bool CoreAudioBase::HandleRestartEvent() { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; RTC_DCHECK_RUN_ON(&thread_checker_audio_); RTC_DCHECK(!audio_thread_.empty()); RTC_DCHECK(IsRestarting()); @@ -716,13 +717,13 @@ bool CoreAudioBase::HandleRestartEvent() { } bool CoreAudioBase::SwitchDeviceIfNeeded() { - RTC_DLOG(INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) - << "]"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << "[" << DirectionToString(direction()) + << "]"; RTC_DCHECK_RUN_ON(&thread_checker_audio_); RTC_DCHECK(IsRestarting()); - RTC_DLOG(INFO) << "device_index=" << device_index_ - << " => device_id: " << device_id_; + RTC_DLOG(LS_INFO) << "device_index=" << device_index_ + << " => device_id: " << device_id_; // Ensure that at least one device exists and can be utilized. The most // probable cause for ending up here is that a device has been removed. @@ -746,7 +747,7 @@ bool CoreAudioBase::SwitchDeviceIfNeeded() { return false; } } else { - RTC_LOG(INFO) + RTC_LOG(LS_INFO) << "Device configuration has not changed => keeping selected device"; } return true; @@ -766,14 +767,14 @@ AudioSessionState CoreAudioBase::GetAudioSessionState() const { // TODO(henrika): only used for debugging purposes currently. ULONG CoreAudioBase::AddRef() { ULONG new_ref = InterlockedIncrement(&ref_count_); - // RTC_DLOG(INFO) << "__AddRef => " << new_ref; + // RTC_DLOG(LS_INFO) << "__AddRef => " << new_ref; return new_ref; } // TODO(henrika): does not call delete this. ULONG CoreAudioBase::Release() { ULONG new_ref = InterlockedDecrement(&ref_count_); - // RTC_DLOG(INFO) << "__Release => " << new_ref; + // RTC_DLOG(LS_INFO) << "__Release => " << new_ref; return new_ref; } @@ -792,9 +793,9 @@ HRESULT CoreAudioBase::QueryInterface(REFIID iid, void** object) { // IAudioSessionEvents::OnStateChanged. HRESULT CoreAudioBase::OnStateChanged(AudioSessionState new_state) { - RTC_DLOG(INFO) << "___" << __FUNCTION__ << "[" - << DirectionToString(direction()) - << "] new_state: " << SessionStateToString(new_state); + RTC_DLOG(LS_INFO) << "___" << __FUNCTION__ << "[" + << DirectionToString(direction()) + << "] new_state: " << SessionStateToString(new_state); return S_OK; } @@ -806,9 +807,9 @@ HRESULT CoreAudioBase::OnStateChanged(AudioSessionState new_state) { // same event. HRESULT CoreAudioBase::OnSessionDisconnected( AudioSessionDisconnectReason disconnect_reason) { - RTC_DLOG(INFO) << "___" << __FUNCTION__ << "[" - << DirectionToString(direction()) << "] reason: " - << SessionDisconnectReasonToString(disconnect_reason); + RTC_DLOG(LS_INFO) << "___" << __FUNCTION__ << "[" + << DirectionToString(direction()) << "] reason: " + << SessionDisconnectReasonToString(disconnect_reason); // Ignore changes in the audio session (don't try to restart) if the user // has explicitly asked for this type of ADM during construction. if (!automatic_restart()) { @@ -869,8 +870,8 @@ void CoreAudioBase::ThreadRun() { RTC_LOG(LS_ERROR) << "MMCSS is not supported"; return; } - RTC_DLOG(INFO) << "[" << DirectionToString(direction()) - << "] ThreadRun starts..."; + RTC_DLOG(LS_INFO) << "[" << DirectionToString(direction()) + << "] ThreadRun starts..."; // TODO(henrika): difference between "Pro Audio" and "Audio"? ScopedMMCSSRegistration mmcss_registration(L"Pro Audio"); ScopedCOMInitializer com_initializer(ScopedCOMInitializer::kMTA); @@ -905,15 +906,15 @@ void CoreAudioBase::ThreadRun() { wait_array, false, INFINITE); switch (wait_result) { case WAIT_OBJECT_0 + 0: - // |stop_event_| has been set. + // `stop_event_` has been set. streaming = false; break; case WAIT_OBJECT_0 + 1: - // |restart_event_| has been set. + // `restart_event_` has been set. error = !HandleRestartEvent(); break; case WAIT_OBJECT_0 + 2: - // |audio_samples_event_| has been set. + // `audio_samples_event_` has been set. error = !on_data_callback_(device_frequency); break; default: @@ -938,8 +939,8 @@ void CoreAudioBase::ThreadRun() { // this stream should be destroyed instead of reused in the future. } - RTC_DLOG(INFO) << "[" << DirectionToString(direction()) - << "] ...ThreadRun stops"; + RTC_DLOG(LS_INFO) << "[" << DirectionToString(direction()) + << "] ...ThreadRun stops"; } } // namespace webrtc_win diff --git a/modules/audio_device/win/core_audio_base_win.h b/modules/audio_device/win/core_audio_base_win.h index afcc6a684d..a9a769e15f 100644 --- a/modules/audio_device/win/core_audio_base_win.h +++ b/modules/audio_device/win/core_audio_base_win.h @@ -63,7 +63,7 @@ class CoreAudioBase : public IAudioSessionEvents { // Callback definition for notifications of run-time error messages. It can // be called e.g. when an active audio device is removed and an audio stream - // is disconnected (|error| is then set to kStreamDisconnected). Both input + // is disconnected (`error` is then set to kStreamDisconnected). Both input // and output clients implements OnErrorCallback() and will trigger an // internal restart sequence for kStreamDisconnected. // This method is currently always called on the audio thread. @@ -103,13 +103,13 @@ class CoreAudioBase : public IAudioSessionEvents { // Releases all allocated COM resources in the base class. void ReleaseCOMObjects(); - // Returns number of active devices given the specified |direction_| set + // Returns number of active devices given the specified `direction_` set // by the parent (input or output). int NumberOfActiveDevices() const; // Returns total number of enumerated audio devices which is the sum of all // active devices plus two extra (one default and one default - // communications). The value in |direction_| determines if capture or + // communications). The value in `direction_` determines if capture or // render devices are counted. int NumberOfEnumeratedDevices() const; diff --git a/modules/audio_device/win/core_audio_input_win.cc b/modules/audio_device/win/core_audio_input_win.cc index 8ea74267df..17790dafc4 100644 --- a/modules/audio_device/win/core_audio_input_win.cc +++ b/modules/audio_device/win/core_audio_input_win.cc @@ -33,24 +33,24 @@ CoreAudioInput::CoreAudioInput(bool automatic_restart) automatic_restart, [this](uint64_t freq) { return OnDataCallback(freq); }, [this](ErrorType err) { return OnErrorCallback(err); }) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); thread_checker_audio_.Detach(); } CoreAudioInput::~CoreAudioInput() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); } int CoreAudioInput::Init() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return 0; } int CoreAudioInput::Terminate() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); StopRecording(); return 0; @@ -62,17 +62,17 @@ int CoreAudioInput::NumDevices() const { } int CoreAudioInput::SetDevice(int index) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << index; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index; RTC_DCHECK_GE(index, 0); RTC_DCHECK_RUN_ON(&thread_checker_); return CoreAudioBase::SetDevice(index); } int CoreAudioInput::SetDevice(AudioDeviceModule::WindowsDeviceType device) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " - << ((device == AudioDeviceModule::kDefaultDevice) - ? "Default" - : "DefaultCommunication"); + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " + << ((device == AudioDeviceModule::kDefaultDevice) + ? "Default" + : "DefaultCommunication"); RTC_DCHECK_RUN_ON(&thread_checker_); return SetDevice((device == AudioDeviceModule::kDefaultDevice) ? 0 : 1); } @@ -80,42 +80,42 @@ int CoreAudioInput::SetDevice(AudioDeviceModule::WindowsDeviceType device) { int CoreAudioInput::DeviceName(int index, std::string* name, std::string* guid) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << index; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index; RTC_DCHECK_RUN_ON(&thread_checker_); RTC_DCHECK(name); return CoreAudioBase::DeviceName(index, name, guid); } void CoreAudioInput::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); audio_device_buffer_ = audio_buffer; } bool CoreAudioInput::RecordingIsInitialized() const { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << initialized_; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << initialized_; RTC_DCHECK_RUN_ON(&thread_checker_); return initialized_; } int CoreAudioInput::InitRecording() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(!initialized_); RTC_DCHECK(!Recording()); RTC_DCHECK(!audio_capture_client_); // Creates an IAudioClient instance and stores the valid interface pointer in - // |audio_client3_|, |audio_client2_|, or |audio_client_| depending on + // `audio_client3_`, `audio_client2_`, or `audio_client_` depending on // platform support. The base class will use optimal input parameters and do // an event driven shared mode initialization. The utilized format will be - // stored in |format_| and can be used for configuration and allocation of + // stored in `format_` and can be used for configuration and allocation of // audio buffers. if (!CoreAudioBase::Init()) { return -1; } RTC_DCHECK(audio_client_); - // Configure the recording side of the audio device buffer using |format_| + // Configure the recording side of the audio device buffer using `format_` // after a trivial sanity check of the format structure. RTC_DCHECK(audio_device_buffer_); WAVEFORMATEX* format = &format_.Format; @@ -155,7 +155,7 @@ int CoreAudioInput::InitRecording() { } int CoreAudioInput::StartRecording() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK(!Recording()); RTC_DCHECK(fine_audio_buffer_); RTC_DCHECK(audio_device_buffer_); @@ -179,7 +179,7 @@ int CoreAudioInput::StartRecording() { } int CoreAudioInput::StopRecording() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; if (!initialized_) { return 0; } @@ -187,7 +187,7 @@ int CoreAudioInput::StopRecording() { // Release resources allocated in InitRecording() and then return if this // method is called without any active input audio. if (!Recording()) { - RTC_DLOG(WARNING) << "No input stream is active"; + RTC_DLOG(LS_WARNING) << "No input stream is active"; ReleaseCOMObjects(); initialized_ = false; return 0; @@ -214,7 +214,7 @@ int CoreAudioInput::StopRecording() { } bool CoreAudioInput::Recording() { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << is_active_; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << is_active_; return is_active_; } @@ -222,7 +222,7 @@ bool CoreAudioInput::Recording() { // are not compatible with the old ADM implementation since it allows accessing // the volume control with any active audio output stream. int CoreAudioInput::VolumeIsAvailable(bool* available) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return IsVolumeControlAvailable(available) ? 0 : -1; } @@ -230,7 +230,7 @@ int CoreAudioInput::VolumeIsAvailable(bool* available) { // Triggers the restart sequence. Only used for testing purposes to emulate // a real event where e.g. an active input device is removed. int CoreAudioInput::RestartRecording() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); if (!Recording()) { return 0; @@ -249,14 +249,14 @@ bool CoreAudioInput::Restarting() const { } int CoreAudioInput::SetSampleRate(uint32_t sample_rate) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); sample_rate_ = sample_rate; return 0; } void CoreAudioInput::ReleaseCOMObjects() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CoreAudioBase::ReleaseCOMObjects(); if (audio_capture_client_.Get()) { audio_capture_client_.Reset(); @@ -273,7 +273,7 @@ bool CoreAudioInput::OnDataCallback(uint64_t device_frequency) { return false; } if (num_data_callbacks_ == 0) { - RTC_LOG(INFO) << "--- Input audio stream is alive ---"; + RTC_LOG(LS_INFO) << "--- Input audio stream is alive ---"; } UINT32 num_frames_in_next_packet = 0; _com_error error = @@ -328,7 +328,7 @@ bool CoreAudioInput::OnDataCallback(uint64_t device_frequency) { } } if (num_data_callbacks_ % 500 == 0) { - RTC_DLOG(INFO) << "latency: " << latency_ms_; + RTC_DLOG(LS_INFO) << "latency: " << latency_ms_; } // The data in the packet is not correlated with the previous packet's @@ -353,7 +353,7 @@ bool CoreAudioInput::OnDataCallback(uint64_t device_frequency) { format_.Format.nBlockAlign * num_frames_to_read); RTC_DLOG(LS_WARNING) << "Captured audio is replaced by silence"; } else { - // Copy recorded audio in |audio_data| to the WebRTC sink using the + // Copy recorded audio in `audio_data` to the WebRTC sink using the // FineAudioBuffer object. fine_audio_buffer_->DeliverRecordedData( rtc::MakeArrayView(reinterpret_cast(audio_data), @@ -382,12 +382,12 @@ bool CoreAudioInput::OnDataCallback(uint64_t device_frequency) { } bool CoreAudioInput::OnErrorCallback(ErrorType error) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << as_integer(error); + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << as_integer(error); RTC_DCHECK_RUN_ON(&thread_checker_audio_); if (error == CoreAudioBase::ErrorType::kStreamDisconnected) { HandleStreamDisconnected(); } else { - RTC_DLOG(WARNING) << "Unsupported error type"; + RTC_DLOG(LS_WARNING) << "Unsupported error type"; } return true; } @@ -397,13 +397,13 @@ absl::optional CoreAudioInput::EstimateLatencyMillis( if (!qpc_to_100ns_) { return absl::nullopt; } - // Input parameter |capture_time_100ns| contains the performance counter at + // Input parameter `capture_time_100ns` contains the performance counter at // the time that the audio endpoint device recorded the device position of // the first audio frame in the data packet converted into 100ns units. // We derive a delay estimate by: // - sampling the current performance counter (qpc_now_raw), // - converting it into 100ns time units (now_time_100ns), and - // - subtracting |capture_time_100ns| from now_time_100ns. + // - subtracting `capture_time_100ns` from now_time_100ns. LARGE_INTEGER perf_counter_now = {}; if (!::QueryPerformanceCounter(&perf_counter_now)) { return absl::nullopt; @@ -426,7 +426,7 @@ absl::optional CoreAudioInput::EstimateLatencyMillis( // safe. // TODO(henrika): add more details. bool CoreAudioInput::HandleStreamDisconnected() { - RTC_DLOG(INFO) << "<<<--- " << __FUNCTION__; + RTC_DLOG(LS_INFO) << "<<<--- " << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_audio_); RTC_DCHECK(automatic_restart()); @@ -445,7 +445,7 @@ bool CoreAudioInput::HandleStreamDisconnected() { return false; } - RTC_DLOG(INFO) << __FUNCTION__ << " --->>>"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << " --->>>"; return true; } diff --git a/modules/audio_device/win/core_audio_output_win.cc b/modules/audio_device/win/core_audio_output_win.cc index 36ec703c3a..c92fedf0e9 100644 --- a/modules/audio_device/win/core_audio_output_win.cc +++ b/modules/audio_device/win/core_audio_output_win.cc @@ -29,25 +29,25 @@ CoreAudioOutput::CoreAudioOutput(bool automatic_restart) automatic_restart, [this](uint64_t freq) { return OnDataCallback(freq); }, [this](ErrorType err) { return OnErrorCallback(err); }) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); thread_checker_audio_.Detach(); } CoreAudioOutput::~CoreAudioOutput() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); Terminate(); } int CoreAudioOutput::Init() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return 0; } int CoreAudioOutput::Terminate() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); StopPlayout(); return 0; @@ -59,17 +59,17 @@ int CoreAudioOutput::NumDevices() const { } int CoreAudioOutput::SetDevice(int index) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << index; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index; RTC_DCHECK_GE(index, 0); RTC_DCHECK_RUN_ON(&thread_checker_); return CoreAudioBase::SetDevice(index); } int CoreAudioOutput::SetDevice(AudioDeviceModule::WindowsDeviceType device) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " - << ((device == AudioDeviceModule::kDefaultDevice) - ? "Default" - : "DefaultCommunication"); + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " + << ((device == AudioDeviceModule::kDefaultDevice) + ? "Default" + : "DefaultCommunication"); RTC_DCHECK_RUN_ON(&thread_checker_); return SetDevice((device == AudioDeviceModule::kDefaultDevice) ? 0 : 1); } @@ -77,42 +77,42 @@ int CoreAudioOutput::SetDevice(AudioDeviceModule::WindowsDeviceType device) { int CoreAudioOutput::DeviceName(int index, std::string* name, std::string* guid) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << index; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << index; RTC_DCHECK_RUN_ON(&thread_checker_); RTC_DCHECK(name); return CoreAudioBase::DeviceName(index, name, guid); } void CoreAudioOutput::AttachAudioBuffer(AudioDeviceBuffer* audio_buffer) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); audio_device_buffer_ = audio_buffer; } bool CoreAudioOutput::PlayoutIsInitialized() const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return initialized_; } int CoreAudioOutput::InitPlayout() { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << IsRestarting(); + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << IsRestarting(); RTC_DCHECK(!initialized_); RTC_DCHECK(!Playing()); RTC_DCHECK(!audio_render_client_); // Creates an IAudioClient instance and stores the valid interface pointer in - // |audio_client3_|, |audio_client2_|, or |audio_client_| depending on + // `audio_client3_`, `audio_client2_`, or `audio_client_` depending on // platform support. The base class will use optimal output parameters and do // an event driven shared mode initialization. The utilized format will be - // stored in |format_| and can be used for configuration and allocation of + // stored in `format_` and can be used for configuration and allocation of // audio buffers. if (!CoreAudioBase::Init()) { return -1; } RTC_DCHECK(audio_client_); - // Configure the playout side of the audio device buffer using |format_| + // Configure the playout side of the audio device buffer using `format_` // after a trivial sanity check of the format structure. RTC_DCHECK(audio_device_buffer_); WAVEFORMATEX* format = &format_.Format; @@ -150,7 +150,7 @@ int CoreAudioOutput::InitPlayout() { } int CoreAudioOutput::StartPlayout() { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << IsRestarting(); + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << IsRestarting(); RTC_DCHECK(!Playing()); RTC_DCHECK(fine_audio_buffer_); RTC_DCHECK(audio_device_buffer_); @@ -180,7 +180,7 @@ int CoreAudioOutput::StartPlayout() { } int CoreAudioOutput::StopPlayout() { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << IsRestarting(); + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << IsRestarting(); if (!initialized_) { return 0; } @@ -188,7 +188,7 @@ int CoreAudioOutput::StopPlayout() { // Release resources allocated in InitPlayout() and then return if this // method is called without any active output audio. if (!Playing()) { - RTC_DLOG(WARNING) << "No output stream is active"; + RTC_DLOG(LS_WARNING) << "No output stream is active"; ReleaseCOMObjects(); initialized_ = false; return 0; @@ -214,7 +214,7 @@ int CoreAudioOutput::StopPlayout() { } bool CoreAudioOutput::Playing() { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << is_active_; + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << is_active_; return is_active_; } @@ -222,7 +222,7 @@ bool CoreAudioOutput::Playing() { // are not compatible with the old ADM implementation since it allows accessing // the volume control with any active audio output stream. int CoreAudioOutput::VolumeIsAvailable(bool* available) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return IsVolumeControlAvailable(available) ? 0 : -1; } @@ -230,7 +230,7 @@ int CoreAudioOutput::VolumeIsAvailable(bool* available) { // Triggers the restart sequence. Only used for testing purposes to emulate // a real event where e.g. an active output device is removed. int CoreAudioOutput::RestartPlayout() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); if (!Playing()) { return 0; @@ -243,20 +243,20 @@ int CoreAudioOutput::RestartPlayout() { } bool CoreAudioOutput::Restarting() const { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); return IsRestarting(); } int CoreAudioOutput::SetSampleRate(uint32_t sample_rate) { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_); sample_rate_ = sample_rate; return 0; } void CoreAudioOutput::ReleaseCOMObjects() { - RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(LS_INFO) << __FUNCTION__; CoreAudioBase::ReleaseCOMObjects(); if (audio_render_client_.Get()) { audio_render_client_.Reset(); @@ -264,7 +264,7 @@ void CoreAudioOutput::ReleaseCOMObjects() { } bool CoreAudioOutput::OnErrorCallback(ErrorType error) { - RTC_DLOG(INFO) << __FUNCTION__ << ": " << as_integer(error); + RTC_DLOG(LS_INFO) << __FUNCTION__ << ": " << as_integer(error); RTC_DCHECK_RUN_ON(&thread_checker_audio_); if (!initialized_ || !Playing()) { return true; @@ -273,7 +273,7 @@ bool CoreAudioOutput::OnErrorCallback(ErrorType error) { if (error == CoreAudioBase::ErrorType::kStreamDisconnected) { HandleStreamDisconnected(); } else { - RTC_DLOG(WARNING) << "Unsupported error type"; + RTC_DLOG(LS_WARNING) << "Unsupported error type"; } return true; } @@ -281,7 +281,7 @@ bool CoreAudioOutput::OnErrorCallback(ErrorType error) { bool CoreAudioOutput::OnDataCallback(uint64_t device_frequency) { RTC_DCHECK_RUN_ON(&thread_checker_audio_); if (num_data_callbacks_ == 0) { - RTC_LOG(INFO) << "--- Output audio stream is alive ---"; + RTC_LOG(LS_INFO) << "--- Output audio stream is alive ---"; } // Get the padding value which indicates the amount of valid unread data that // the endpoint buffer currently contains. @@ -329,12 +329,12 @@ bool CoreAudioOutput::OnDataCallback(uint64_t device_frequency) { // TODO(henrika): note that FineAudioBuffer adds latency as well. latency_ms_ = EstimateOutputLatencyMillis(device_frequency); if (num_data_callbacks_ % 500 == 0) { - RTC_DLOG(INFO) << "latency: " << latency_ms_; + RTC_DLOG(LS_INFO) << "latency: " << latency_ms_; } } // Get audio data from WebRTC and write it to the allocated buffer in - // |audio_data|. The playout latency is not updated for each callback. + // `audio_data`. The playout latency is not updated for each callback. fine_audio_buffer_->GetPlayoutData( rtc::MakeArrayView(reinterpret_cast(audio_data), num_requested_frames * format_.Format.nChannels), @@ -360,7 +360,7 @@ int CoreAudioOutput::EstimateOutputLatencyMillis(uint64_t device_frequency) { UINT64 position = 0; UINT64 qpc_position = 0; int delay_ms = 0; - // Get the device position through output parameter |position|. This is the + // Get the device position through output parameter `position`. This is the // stream position of the sample that is currently playing through the // speakers. _com_error error = audio_clock_->GetPosition(&position, &qpc_position); @@ -394,7 +394,7 @@ int CoreAudioOutput::EstimateOutputLatencyMillis(uint64_t device_frequency) { // safe. // TODO(henrika): add more details. bool CoreAudioOutput::HandleStreamDisconnected() { - RTC_DLOG(INFO) << "<<<--- " << __FUNCTION__; + RTC_DLOG(LS_INFO) << "<<<--- " << __FUNCTION__; RTC_DCHECK_RUN_ON(&thread_checker_audio_); RTC_DCHECK(automatic_restart()); @@ -413,7 +413,7 @@ bool CoreAudioOutput::HandleStreamDisconnected() { return false; } - RTC_DLOG(INFO) << __FUNCTION__ << " --->>>"; + RTC_DLOG(LS_INFO) << __FUNCTION__ << " --->>>"; return true; } diff --git a/modules/audio_device/win/core_audio_utility_win.cc b/modules/audio_device/win/core_audio_utility_win.cc index 289abe9d26..c5a3520868 100644 --- a/modules/audio_device/win/core_audio_utility_win.cc +++ b/modules/audio_device/win/core_audio_utility_win.cc @@ -38,9 +38,9 @@ using core_audio_utility::ErrorToString; // Converts from channel mask to list of included channels. // Each audio data format contains channels for one or more of the positions // listed below. The number of channels simply equals the number of nonzero -// flag bits in the |channel_mask|. The relative positions of the channels +// flag bits in the `channel_mask`. The relative positions of the channels // within each block of audio data always follow the same relative ordering -// as the flag bits in the table below. For example, if |channel_mask| contains +// as the flag bits in the table below. For example, if `channel_mask` contains // the value 0x00000033, the format defines four audio channels that are // assigned for playback to the front-left, front-right, back-left, // and back-right speakers, respectively. The channel data should be interleaved @@ -205,7 +205,7 @@ bool LoadAudiosesDll() { L"%WINDIR%\\system32\\audioses.dll"; wchar_t path[MAX_PATH] = {0}; ExpandEnvironmentStringsW(kAudiosesDLL, path, arraysize(path)); - RTC_DLOG(INFO) << rtc::ToUtf8(path); + RTC_DLOG(LS_INFO) << rtc::ToUtf8(path); return (LoadLibraryExW(path, nullptr, LOAD_WITH_ALTERED_SEARCH_PATH) != nullptr); } @@ -214,7 +214,7 @@ bool LoadAvrtDll() { static const wchar_t* const kAvrtDLL = L"%WINDIR%\\system32\\Avrt.dll"; wchar_t path[MAX_PATH] = {0}; ExpandEnvironmentStringsW(kAvrtDLL, path, arraysize(path)); - RTC_DLOG(INFO) << rtc::ToUtf8(path); + RTC_DLOG(LS_INFO) << rtc::ToUtf8(path); return (LoadLibraryExW(path, nullptr, LOAD_WITH_ALTERED_SEARCH_PATH) != nullptr); } @@ -278,15 +278,15 @@ bool IsDeviceActive(IMMDevice* device) { return SUCCEEDED(device->GetState(&state)) && (state & DEVICE_STATE_ACTIVE); } -// Retrieve an audio device specified by |device_id| or a default device -// specified by data-flow direction and role if |device_id| is default. +// Retrieve an audio device specified by `device_id` or a default device +// specified by data-flow direction and role if `device_id` is default. ComPtr CreateDeviceInternal(const std::string& device_id, EDataFlow data_flow, ERole role) { - RTC_DLOG(INFO) << "CreateDeviceInternal: " - "id=" - << device_id << ", flow=" << FlowToString(data_flow) - << ", role=" << RoleToString(role); + RTC_DLOG(LS_INFO) << "CreateDeviceInternal: " + "id=" + << device_id << ", flow=" << FlowToString(data_flow) + << ", role=" << RoleToString(role); ComPtr audio_endpoint_device; // Create the IMMDeviceEnumerator interface. @@ -500,7 +500,7 @@ bool GetDeviceNamesInternal(EDataFlow data_flow, } // Loop over all active devices and add friendly name and unique id to the - // |device_names| queue. For now, devices are added at indexes 0, 1, ..., N-1 + // `device_names` queue. For now, devices are added at indexes 0, 1, ..., N-1 // but they will be moved to 2,3,..., N+1 at the next stage when default and // default communication devices are added at index 0 and 1. ComPtr audio_device; @@ -587,10 +587,10 @@ bool GetDeviceNamesInternal(EDataFlow data_flow, // [2] friendly name: Headset Microphone (2- Arctis 7 Chat) // [2] unique id : {0.0.1.00000000}.{ff9eed76-196e-467a-b295-26986e69451c} for (size_t i = 0; i < device_names->size(); ++i) { - RTC_DLOG(INFO) << "[" << i - << "] friendly name: " << (*device_names)[i].device_name; - RTC_DLOG(INFO) << "[" << i - << "] unique id : " << (*device_names)[i].unique_id; + RTC_DLOG(LS_INFO) << "[" << i + << "] friendly name: " << (*device_names)[i].device_name; + RTC_DLOG(LS_INFO) << "[" << i + << "] unique id : " << (*device_names)[i].unique_id; } return true; @@ -611,11 +611,11 @@ HRESULT GetPreferredAudioParametersInternal(IAudioClient* client, return hr; int sample_rate = mix_format.Format.nSamplesPerSec; - // Override default sample rate if |fixed_sample_rate| is set and different + // Override default sample rate if `fixed_sample_rate` is set and different // from the default rate. if (fixed_sample_rate > 0 && fixed_sample_rate != sample_rate) { - RTC_DLOG(INFO) << "Using fixed sample rate instead of the preferred: " - << sample_rate << " is replaced by " << fixed_sample_rate; + RTC_DLOG(LS_INFO) << "Using fixed sample rate instead of the preferred: " + << sample_rate << " is replaced by " << fixed_sample_rate; sample_rate = fixed_sample_rate; } // TODO(henrika): utilize full mix_format.Format.wBitsPerSample. @@ -634,7 +634,7 @@ HRESULT GetPreferredAudioParametersInternal(IAudioClient* client, AudioParameters audio_params(sample_rate, channels, frames_per_buffer); *params = audio_params; - RTC_DLOG(INFO) << audio_params.ToString(); + RTC_DLOG(LS_INFO) << audio_params.ToString(); return hr; } @@ -669,13 +669,13 @@ size_t WaveFormatWrapper::size() const { } bool IsSupported() { - RTC_DLOG(INFO) << "IsSupported"; + RTC_DLOG(LS_INFO) << "IsSupported"; static bool g_is_supported = IsSupportedInternal(); return g_is_supported; } bool IsMMCSSSupported() { - RTC_DLOG(INFO) << "IsMMCSSSupported"; + RTC_DLOG(LS_INFO) << "IsMMCSSSupported"; return LoadAvrtDll(); } @@ -698,7 +698,7 @@ int NumberOfActiveDevices(EDataFlow data_flow) { } else if (data_flow == eAll) { str = "Total number of devices: "; } - RTC_DLOG(INFO) << str << number_of_active_devices; + RTC_DLOG(LS_INFO) << str << number_of_active_devices; return static_cast(number_of_active_devices); } @@ -713,33 +713,33 @@ uint32_t GetAudioClientVersion() { } ComPtr CreateDeviceEnumerator() { - RTC_DLOG(INFO) << "CreateDeviceEnumerator"; + RTC_DLOG(LS_INFO) << "CreateDeviceEnumerator"; return CreateDeviceEnumeratorInternal(true); } std::string GetDefaultInputDeviceID() { - RTC_DLOG(INFO) << "GetDefaultInputDeviceID"; + RTC_DLOG(LS_INFO) << "GetDefaultInputDeviceID"; ComPtr device( CreateDevice(AudioDeviceName::kDefaultDeviceId, eCapture, eConsole)); return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string(); } std::string GetDefaultOutputDeviceID() { - RTC_DLOG(INFO) << "GetDefaultOutputDeviceID"; + RTC_DLOG(LS_INFO) << "GetDefaultOutputDeviceID"; ComPtr device( CreateDevice(AudioDeviceName::kDefaultDeviceId, eRender, eConsole)); return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string(); } std::string GetCommunicationsInputDeviceID() { - RTC_DLOG(INFO) << "GetCommunicationsInputDeviceID"; + RTC_DLOG(LS_INFO) << "GetCommunicationsInputDeviceID"; ComPtr device(CreateDevice(AudioDeviceName::kDefaultDeviceId, eCapture, eCommunications)); return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string(); } std::string GetCommunicationsOutputDeviceID() { - RTC_DLOG(INFO) << "GetCommunicationsOutputDeviceID"; + RTC_DLOG(LS_INFO) << "GetCommunicationsOutputDeviceID"; ComPtr device(CreateDevice(AudioDeviceName::kDefaultDeviceId, eRender, eCommunications)); return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string(); @@ -748,24 +748,24 @@ std::string GetCommunicationsOutputDeviceID() { ComPtr CreateDevice(const std::string& device_id, EDataFlow data_flow, ERole role) { - RTC_DLOG(INFO) << "CreateDevice"; + RTC_DLOG(LS_INFO) << "CreateDevice"; return CreateDeviceInternal(device_id, data_flow, role); } AudioDeviceName GetDeviceName(IMMDevice* device) { - RTC_DLOG(INFO) << "GetDeviceName"; + RTC_DLOG(LS_INFO) << "GetDeviceName"; RTC_DCHECK(device); AudioDeviceName device_name(GetDeviceFriendlyNameInternal(device), GetDeviceIdInternal(device)); - RTC_DLOG(INFO) << "friendly name: " << device_name.device_name; - RTC_DLOG(INFO) << "unique id : " << device_name.unique_id; + RTC_DLOG(LS_INFO) << "friendly name: " << device_name.device_name; + RTC_DLOG(LS_INFO) << "unique id : " << device_name.unique_id; return device_name; } std::string GetFriendlyName(const std::string& device_id, EDataFlow data_flow, ERole role) { - RTC_DLOG(INFO) << "GetFriendlyName"; + RTC_DLOG(LS_INFO) << "GetFriendlyName"; ComPtr audio_device = CreateDevice(device_id, data_flow, role); if (!audio_device.Get()) return std::string(); @@ -775,7 +775,7 @@ std::string GetFriendlyName(const std::string& device_id, } EDataFlow GetDataFlow(IMMDevice* device) { - RTC_DLOG(INFO) << "GetDataFlow"; + RTC_DLOG(LS_INFO) << "GetDataFlow"; RTC_DCHECK(device); ComPtr endpoint; _com_error error = device->QueryInterface(endpoint.GetAddressOf()); @@ -796,32 +796,32 @@ EDataFlow GetDataFlow(IMMDevice* device) { } bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names) { - RTC_DLOG(INFO) << "GetInputDeviceNames"; + RTC_DLOG(LS_INFO) << "GetInputDeviceNames"; RTC_DCHECK(device_names); RTC_DCHECK(device_names->empty()); return GetDeviceNamesInternal(eCapture, device_names); } bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names) { - RTC_DLOG(INFO) << "GetOutputDeviceNames"; + RTC_DLOG(LS_INFO) << "GetOutputDeviceNames"; RTC_DCHECK(device_names); RTC_DCHECK(device_names->empty()); return GetDeviceNamesInternal(eRender, device_names); } ComPtr CreateSessionManager2(IMMDevice* device) { - RTC_DLOG(INFO) << "CreateSessionManager2"; + RTC_DLOG(LS_INFO) << "CreateSessionManager2"; return CreateSessionManager2Internal(device); } Microsoft::WRL::ComPtr CreateSessionEnumerator( IMMDevice* device) { - RTC_DLOG(INFO) << "CreateSessionEnumerator"; + RTC_DLOG(LS_INFO) << "CreateSessionEnumerator"; return CreateSessionEnumeratorInternal(device); } int NumberOfActiveSessions(IMMDevice* device) { - RTC_DLOG(INFO) << "NumberOfActiveSessions"; + RTC_DLOG(LS_INFO) << "NumberOfActiveSessions"; ComPtr session_enumerator = CreateSessionEnumerator(device); @@ -833,7 +833,7 @@ int NumberOfActiveSessions(IMMDevice* device) { << ErrorToString(error); return 0; } - RTC_DLOG(INFO) << "Total number of audio sessions: " << session_count; + RTC_DLOG(LS_INFO) << "Total number of audio sessions: " << session_count; int num_active = 0; for (int session = 0; session < session_count; session++) { @@ -849,8 +849,8 @@ int NumberOfActiveSessions(IMMDevice* device) { // Log the display name of the audio session for debugging purposes. LPWSTR display_name; if (SUCCEEDED(session_control->GetDisplayName(&display_name))) { - RTC_DLOG(INFO) << "display name: " - << rtc::ToUtf8(display_name, wcslen(display_name)); + RTC_DLOG(LS_INFO) << "display name: " + << rtc::ToUtf8(display_name, wcslen(display_name)); CoTaskMemFree(display_name); } @@ -867,14 +867,14 @@ int NumberOfActiveSessions(IMMDevice* device) { } } - RTC_DLOG(INFO) << "Number of active audio sessions: " << num_active; + RTC_DLOG(LS_INFO) << "Number of active audio sessions: " << num_active; return num_active; } ComPtr CreateClient(const std::string& device_id, EDataFlow data_flow, ERole role) { - RTC_DLOG(INFO) << "CreateClient"; + RTC_DLOG(LS_INFO) << "CreateClient"; ComPtr device(CreateDevice(device_id, data_flow, role)); return CreateClientInternal(device.Get()); } @@ -882,7 +882,7 @@ ComPtr CreateClient(const std::string& device_id, ComPtr CreateClient2(const std::string& device_id, EDataFlow data_flow, ERole role) { - RTC_DLOG(INFO) << "CreateClient2"; + RTC_DLOG(LS_INFO) << "CreateClient2"; ComPtr device(CreateDevice(device_id, data_flow, role)); return CreateClient2Internal(device.Get()); } @@ -890,13 +890,13 @@ ComPtr CreateClient2(const std::string& device_id, ComPtr CreateClient3(const std::string& device_id, EDataFlow data_flow, ERole role) { - RTC_DLOG(INFO) << "CreateClient3"; + RTC_DLOG(LS_INFO) << "CreateClient3"; ComPtr device(CreateDevice(device_id, data_flow, role)); return CreateClient3Internal(device.Get()); } HRESULT SetClientProperties(IAudioClient2* client) { - RTC_DLOG(INFO) << "SetClientProperties"; + RTC_DLOG(LS_INFO) << "SetClientProperties"; RTC_DCHECK(client); if (GetAudioClientVersion() < 2) { RTC_LOG(LS_WARNING) << "Requires IAudioClient2 or higher"; @@ -909,7 +909,7 @@ HRESULT SetClientProperties(IAudioClient2* client) { props.eCategory = AudioCategory_Communications; // Hardware-offloaded audio processing allows the main audio processing tasks // to be performed outside the computer's main CPU. Check support and log the - // result but hard-code |bIsOffload| to FALSE for now. + // result but hard-code `bIsOffload` to FALSE for now. // TODO(henrika): evaluate hardware-offloading. Might complicate usage of // IAudioClient::GetMixFormat(). BOOL supports_offload = FALSE; @@ -919,10 +919,10 @@ HRESULT SetClientProperties(IAudioClient2* client) { RTC_LOG(LS_ERROR) << "IAudioClient2::IsOffloadCapable failed: " << ErrorToString(error); } - RTC_DLOG(INFO) << "supports_offload: " << supports_offload; + RTC_DLOG(LS_INFO) << "supports_offload: " << supports_offload; props.bIsOffload = false; #if (NTDDI_VERSION < NTDDI_WINBLUE) - RTC_DLOG(INFO) << "options: Not supported in this build"; + RTC_DLOG(LS_INFO) << "options: Not supported in this build"; #else // TODO(henrika): pros and cons compared with AUDCLNT_STREAMOPTIONS_NONE? props.Options |= AUDCLNT_STREAMOPTIONS_NONE; @@ -939,7 +939,7 @@ HRESULT SetClientProperties(IAudioClient2* client) { // an appropriate interface to use for communications scenarios. // This interface is mainly meant for pro audio scenarios. // props.Options |= AUDCLNT_STREAMOPTIONS_MATCH_FORMAT; - RTC_DLOG(INFO) << "options: 0x" << rtc::ToHex(props.Options); + RTC_DLOG(LS_INFO) << "options: 0x" << rtc::ToHex(props.Options); #endif error = client->SetClientProperties(&props); if (FAILED(error.Error())) { @@ -953,7 +953,7 @@ HRESULT GetBufferSizeLimits(IAudioClient2* client, const WAVEFORMATEXTENSIBLE* format, REFERENCE_TIME* min_buffer_duration, REFERENCE_TIME* max_buffer_duration) { - RTC_DLOG(INFO) << "GetBufferSizeLimits"; + RTC_DLOG(LS_INFO) << "GetBufferSizeLimits"; RTC_DCHECK(client); if (GetAudioClientVersion() < 2) { RTC_LOG(LS_WARNING) << "Requires IAudioClient2 or higher"; @@ -975,21 +975,21 @@ HRESULT GetBufferSizeLimits(IAudioClient2* client, } else { *min_buffer_duration = min_duration; *max_buffer_duration = max_duration; - RTC_DLOG(INFO) << "min_buffer_duration: " << min_buffer_duration; - RTC_DLOG(INFO) << "max_buffer_duration: " << max_buffer_duration; + RTC_DLOG(LS_INFO) << "min_buffer_duration: " << min_buffer_duration; + RTC_DLOG(LS_INFO) << "max_buffer_duration: " << max_buffer_duration; } return error.Error(); } HRESULT GetSharedModeMixFormat(IAudioClient* client, WAVEFORMATEXTENSIBLE* format) { - RTC_DLOG(INFO) << "GetSharedModeMixFormat"; + RTC_DLOG(LS_INFO) << "GetSharedModeMixFormat"; RTC_DCHECK(client); // The GetMixFormat method retrieves the stream format that the audio engine // uses for its internal processing of shared-mode streams. The method // allocates the storage for the structure and this memory will be released - // when |mix_format| goes out of scope. The GetMixFormat method retrieves a + // when `mix_format` goes out of scope. The GetMixFormat method retrieves a // format descriptor that is in the form of a WAVEFORMATEXTENSIBLE structure // instead of a standalone WAVEFORMATEX structure. The method outputs a // pointer to the WAVEFORMATEX structure that is embedded at the start of @@ -1017,10 +1017,10 @@ HRESULT GetSharedModeMixFormat(IAudioClient* client, return AUDCLNT_E_UNSUPPORTED_FORMAT; } - // Log a warning for the rare case where |mix_format| only contains a + // Log a warning for the rare case where `mix_format` only contains a // stand-alone WAVEFORMATEX structure but don't return. if (!wrapped_format.IsExtensible()) { - RTC_DLOG(WARNING) + RTC_DLOG(LS_WARNING) << "The returned format contains no extended information. " "The size is " << wrapped_format.size() << " bytes."; @@ -1030,7 +1030,7 @@ HRESULT GetSharedModeMixFormat(IAudioClient* client, // the returned structure is correctly extended or not. RTC_CHECK_LE(wrapped_format.size(), sizeof(WAVEFORMATEXTENSIBLE)); memcpy(format, wrapped_format.get(), wrapped_format.size()); - RTC_DLOG(INFO) << WaveFormatToString(format); + RTC_DLOG(LS_INFO) << WaveFormatToString(format); return error.Error(); } @@ -1038,7 +1038,7 @@ HRESULT GetSharedModeMixFormat(IAudioClient* client, bool IsFormatSupported(IAudioClient* client, AUDCLNT_SHAREMODE share_mode, const WAVEFORMATEXTENSIBLE* format) { - RTC_DLOG(INFO) << "IsFormatSupported"; + RTC_DLOG(LS_INFO) << "IsFormatSupported"; RTC_DCHECK(client); ScopedCoMem closest_match; // This method provides a way for a client to determine, before calling @@ -1049,23 +1049,23 @@ bool IsFormatSupported(IAudioClient* client, _com_error error = client->IsFormatSupported( share_mode, reinterpret_cast(format), &closest_match); - RTC_LOG(INFO) << WaveFormatToString( + RTC_LOG(LS_INFO) << WaveFormatToString( const_cast(format)); if ((error.Error() == S_OK) && (closest_match == nullptr)) { - RTC_DLOG(INFO) + RTC_DLOG(LS_INFO) << "The audio endpoint device supports the specified stream format"; } else if ((error.Error() == S_FALSE) && (closest_match != nullptr)) { // Call succeeded with a closest match to the specified format. This log can // only be triggered for shared mode. RTC_LOG(LS_WARNING) << "Exact format is not supported, but a closest match exists"; - RTC_LOG(INFO) << WaveFormatToString(closest_match.Get()); + RTC_LOG(LS_INFO) << WaveFormatToString(closest_match.Get()); } else if ((error.Error() == AUDCLNT_E_UNSUPPORTED_FORMAT) && (closest_match == nullptr)) { // The audio engine does not support the caller-specified format or any // similar format. - RTC_DLOG(INFO) << "The audio endpoint device does not support the " - "specified stream format"; + RTC_DLOG(LS_INFO) << "The audio endpoint device does not support the " + "specified stream format"; } else { RTC_LOG(LS_ERROR) << "IAudioClient::IsFormatSupported failed: " << ErrorToString(error); @@ -1077,10 +1077,10 @@ bool IsFormatSupported(IAudioClient* client, HRESULT GetDevicePeriod(IAudioClient* client, AUDCLNT_SHAREMODE share_mode, REFERENCE_TIME* device_period) { - RTC_DLOG(INFO) << "GetDevicePeriod"; + RTC_DLOG(LS_INFO) << "GetDevicePeriod"; RTC_DCHECK(client); - // The |default_period| parameter specifies the default scheduling period - // for a shared-mode stream. The |minimum_period| parameter specifies the + // The `default_period` parameter specifies the default scheduling period + // for a shared-mode stream. The `minimum_period` parameter specifies the // minimum scheduling period for an exclusive-mode stream. // The time is expressed in 100-nanosecond units. REFERENCE_TIME default_period = 0; @@ -1094,10 +1094,10 @@ HRESULT GetDevicePeriod(IAudioClient* client, *device_period = (share_mode == AUDCLNT_SHAREMODE_SHARED) ? default_period : minimum_period; - RTC_LOG(INFO) << "device_period: " - << ReferenceTimeToTimeDelta(*device_period).ms() << " [ms]"; - RTC_LOG(INFO) << "minimum_period: " - << ReferenceTimeToTimeDelta(minimum_period).ms() << " [ms]"; + RTC_LOG(LS_INFO) << "device_period: " + << ReferenceTimeToTimeDelta(*device_period).ms() << " [ms]"; + RTC_LOG(LS_INFO) << "minimum_period: " + << ReferenceTimeToTimeDelta(minimum_period).ms() << " [ms]"; return error.Error(); } @@ -1107,7 +1107,7 @@ HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3, uint32_t* fundamental_period_in_frames, uint32_t* min_period_in_frames, uint32_t* max_period_in_frames) { - RTC_DLOG(INFO) << "GetSharedModeEnginePeriod"; + RTC_DLOG(LS_INFO) << "GetSharedModeEnginePeriod"; RTC_DCHECK(client3); UINT32 default_period = 0; @@ -1125,15 +1125,17 @@ HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3, WAVEFORMATEX format_ex = format->Format; const WORD sample_rate = format_ex.nSamplesPerSec; - RTC_LOG(INFO) << "default_period_in_frames: " << default_period << " (" - << FramesToMilliseconds(default_period, sample_rate) << " ms)"; - RTC_LOG(INFO) << "fundamental_period_in_frames: " << fundamental_period - << " (" << FramesToMilliseconds(fundamental_period, sample_rate) - << " ms)"; - RTC_LOG(INFO) << "min_period_in_frames: " << min_period << " (" - << FramesToMilliseconds(min_period, sample_rate) << " ms)"; - RTC_LOG(INFO) << "max_period_in_frames: " << max_period << " (" - << FramesToMilliseconds(max_period, sample_rate) << " ms)"; + RTC_LOG(LS_INFO) << "default_period_in_frames: " << default_period << " (" + << FramesToMilliseconds(default_period, sample_rate) + << " ms)"; + RTC_LOG(LS_INFO) << "fundamental_period_in_frames: " << fundamental_period + << " (" + << FramesToMilliseconds(fundamental_period, sample_rate) + << " ms)"; + RTC_LOG(LS_INFO) << "min_period_in_frames: " << min_period << " (" + << FramesToMilliseconds(min_period, sample_rate) << " ms)"; + RTC_LOG(LS_INFO) << "max_period_in_frames: " << max_period << " (" + << FramesToMilliseconds(max_period, sample_rate) << " ms)"; *default_period_in_frames = default_period; *fundamental_period_in_frames = fundamental_period; *min_period_in_frames = min_period; @@ -1143,7 +1145,7 @@ HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3, HRESULT GetPreferredAudioParameters(IAudioClient* client, AudioParameters* params) { - RTC_DLOG(INFO) << "GetPreferredAudioParameters"; + RTC_DLOG(LS_INFO) << "GetPreferredAudioParameters"; RTC_DCHECK(client); return GetPreferredAudioParametersInternal(client, params, -1); } @@ -1151,7 +1153,7 @@ HRESULT GetPreferredAudioParameters(IAudioClient* client, HRESULT GetPreferredAudioParameters(IAudioClient* client, webrtc::AudioParameters* params, uint32_t sample_rate) { - RTC_DLOG(INFO) << "GetPreferredAudioParameters: " << sample_rate; + RTC_DLOG(LS_INFO) << "GetPreferredAudioParameters: " << sample_rate; RTC_DCHECK(client); return GetPreferredAudioParametersInternal(client, params, sample_rate); } @@ -1162,8 +1164,9 @@ HRESULT SharedModeInitialize(IAudioClient* client, REFERENCE_TIME buffer_duration, bool auto_convert_pcm, uint32_t* endpoint_buffer_size) { - RTC_DLOG(INFO) << "SharedModeInitialize: buffer_duration=" << buffer_duration - << ", auto_convert_pcm=" << auto_convert_pcm; + RTC_DLOG(LS_INFO) << "SharedModeInitialize: buffer_duration=" + << buffer_duration + << ", auto_convert_pcm=" << auto_convert_pcm; RTC_DCHECK(client); RTC_DCHECK_GE(buffer_duration, 0); if (buffer_duration != 0) { @@ -1188,7 +1191,7 @@ HRESULT SharedModeInitialize(IAudioClient* client, (event_handle != nullptr && event_handle != INVALID_HANDLE_VALUE); if (use_event) { stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK; - RTC_DLOG(INFO) << "The stream is initialized to be event driven"; + RTC_DLOG(LS_INFO) << "The stream is initialized to be event driven"; } // Check if sample-rate conversion is requested. @@ -1197,14 +1200,14 @@ HRESULT SharedModeInitialize(IAudioClient* client, // from our (the client's) format to the audio engine mix format. // Currently only supported for testing, i.e., not possible to enable using // public APIs. - RTC_DLOG(INFO) << "The stream is initialized to support rate conversion"; + RTC_DLOG(LS_INFO) << "The stream is initialized to support rate conversion"; stream_flags |= AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM; stream_flags |= AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY; } - RTC_DLOG(INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags); + RTC_DLOG(LS_INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags); - // Initialize the shared mode client for minimal delay if |buffer_duration| - // is 0 or possibly a higher delay (more robust) if |buffer_duration| is + // Initialize the shared mode client for minimal delay if `buffer_duration` + // is 0 or possibly a higher delay (more robust) if `buffer_duration` is // larger than 0. The actual size is given by IAudioClient::GetBufferSize(). _com_error error = client->Initialize( AUDCLNT_SHAREMODE_SHARED, stream_flags, buffer_duration, 0, @@ -1243,23 +1246,23 @@ HRESULT SharedModeInitialize(IAudioClient* client, } *endpoint_buffer_size = buffer_size_in_frames; - RTC_DLOG(INFO) << "endpoint buffer size: " << buffer_size_in_frames - << " [audio frames]"; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " << buffer_size_in_frames + << " [audio frames]"; const double size_in_ms = static_cast(buffer_size_in_frames) / (format->Format.nSamplesPerSec / 1000.0); - RTC_DLOG(INFO) << "endpoint buffer size: " - << static_cast(size_in_ms + 0.5) << " [ms]"; - RTC_DLOG(INFO) << "bytes per audio frame: " << format->Format.nBlockAlign; - RTC_DLOG(INFO) << "endpoint buffer size: " - << buffer_size_in_frames * format->Format.nChannels * - (format->Format.wBitsPerSample / 8) - << " [bytes]"; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " + << static_cast(size_in_ms + 0.5) << " [ms]"; + RTC_DLOG(LS_INFO) << "bytes per audio frame: " << format->Format.nBlockAlign; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " + << buffer_size_in_frames * format->Format.nChannels * + (format->Format.wBitsPerSample / 8) + << " [bytes]"; // TODO(henrika): utilize when delay measurements are added. REFERENCE_TIME latency = 0; error = client->GetStreamLatency(&latency); - RTC_DLOG(INFO) << "stream latency: " << ReferenceTimeToTimeDelta(latency).ms() - << " [ms]"; + RTC_DLOG(LS_INFO) << "stream latency: " + << ReferenceTimeToTimeDelta(latency).ms() << " [ms]"; return error.Error(); } @@ -1269,9 +1272,9 @@ HRESULT SharedModeInitializeLowLatency(IAudioClient3* client, uint32_t period_in_frames, bool auto_convert_pcm, uint32_t* endpoint_buffer_size) { - RTC_DLOG(INFO) << "SharedModeInitializeLowLatency: period_in_frames=" - << period_in_frames - << ", auto_convert_pcm=" << auto_convert_pcm; + RTC_DLOG(LS_INFO) << "SharedModeInitializeLowLatency: period_in_frames=" + << period_in_frames + << ", auto_convert_pcm=" << auto_convert_pcm; RTC_DCHECK(client); RTC_DCHECK_GT(period_in_frames, 0); if (auto_convert_pcm) { @@ -1284,17 +1287,17 @@ HRESULT SharedModeInitializeLowLatency(IAudioClient3* client, (event_handle != nullptr && event_handle != INVALID_HANDLE_VALUE); if (use_event) { stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK; - RTC_DLOG(INFO) << "The stream is initialized to be event driven"; + RTC_DLOG(LS_INFO) << "The stream is initialized to be event driven"; } if (auto_convert_pcm) { stream_flags |= AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM; stream_flags |= AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY; } - RTC_DLOG(INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags); + RTC_DLOG(LS_INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags); // Initialize the shared mode client for lowest possible latency. // It is assumed that GetSharedModeEnginePeriod() has been used to query the - // smallest possible engine period and that it is given by |period_in_frames|. + // smallest possible engine period and that it is given by `period_in_frames`. _com_error error = client->InitializeSharedAudioStream( stream_flags, period_in_frames, reinterpret_cast(format), nullptr); @@ -1324,17 +1327,17 @@ HRESULT SharedModeInitializeLowLatency(IAudioClient3* client, } *endpoint_buffer_size = buffer_size_in_frames; - RTC_DLOG(INFO) << "endpoint buffer size: " << buffer_size_in_frames - << " [audio frames]"; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " << buffer_size_in_frames + << " [audio frames]"; const double size_in_ms = static_cast(buffer_size_in_frames) / (format->Format.nSamplesPerSec / 1000.0); - RTC_DLOG(INFO) << "endpoint buffer size: " - << static_cast(size_in_ms + 0.5) << " [ms]"; - RTC_DLOG(INFO) << "bytes per audio frame: " << format->Format.nBlockAlign; - RTC_DLOG(INFO) << "endpoint buffer size: " - << buffer_size_in_frames * format->Format.nChannels * - (format->Format.wBitsPerSample / 8) - << " [bytes]"; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " + << static_cast(size_in_ms + 0.5) << " [ms]"; + RTC_DLOG(LS_INFO) << "bytes per audio frame: " << format->Format.nBlockAlign; + RTC_DLOG(LS_INFO) << "endpoint buffer size: " + << buffer_size_in_frames * format->Format.nChannels * + (format->Format.wBitsPerSample / 8) + << " [bytes]"; // TODO(henrika): utilize when delay measurements are added. REFERENCE_TIME latency = 0; @@ -1343,14 +1346,14 @@ HRESULT SharedModeInitializeLowLatency(IAudioClient3* client, RTC_LOG(LS_WARNING) << "IAudioClient::GetStreamLatency failed: " << ErrorToString(error); } else { - RTC_DLOG(INFO) << "stream latency: " - << ReferenceTimeToTimeDelta(latency).ms() << " [ms]"; + RTC_DLOG(LS_INFO) << "stream latency: " + << ReferenceTimeToTimeDelta(latency).ms() << " [ms]"; } return error.Error(); } ComPtr CreateRenderClient(IAudioClient* client) { - RTC_DLOG(INFO) << "CreateRenderClient"; + RTC_DLOG(LS_INFO) << "CreateRenderClient"; RTC_DCHECK(client); // Get access to the IAudioRenderClient interface. This interface // enables us to write output data to a rendering endpoint buffer. @@ -1366,7 +1369,7 @@ ComPtr CreateRenderClient(IAudioClient* client) { } ComPtr CreateCaptureClient(IAudioClient* client) { - RTC_DLOG(INFO) << "CreateCaptureClient"; + RTC_DLOG(LS_INFO) << "CreateCaptureClient"; RTC_DCHECK(client); // Get access to the IAudioCaptureClient interface. This interface // enables us to read input data from a capturing endpoint buffer. @@ -1382,7 +1385,7 @@ ComPtr CreateCaptureClient(IAudioClient* client) { } ComPtr CreateAudioClock(IAudioClient* client) { - RTC_DLOG(INFO) << "CreateAudioClock"; + RTC_DLOG(LS_INFO) << "CreateAudioClock"; RTC_DCHECK(client); // Get access to the IAudioClock interface. This interface enables us to // monitor a stream's data rate and the current position in the stream. @@ -1397,7 +1400,7 @@ ComPtr CreateAudioClock(IAudioClient* client) { } ComPtr CreateAudioSessionControl(IAudioClient* client) { - RTC_DLOG(INFO) << "CreateAudioSessionControl"; + RTC_DLOG(LS_INFO) << "CreateAudioSessionControl"; RTC_DCHECK(client); ComPtr audio_session_control; _com_error error = client->GetService(IID_PPV_ARGS(&audio_session_control)); @@ -1410,7 +1413,7 @@ ComPtr CreateAudioSessionControl(IAudioClient* client) { } ComPtr CreateSimpleAudioVolume(IAudioClient* client) { - RTC_DLOG(INFO) << "CreateSimpleAudioVolume"; + RTC_DLOG(LS_INFO) << "CreateSimpleAudioVolume"; RTC_DCHECK(client); // Get access to the ISimpleAudioVolume interface. This interface enables a // client to control the master volume level of an audio session. @@ -1427,7 +1430,7 @@ ComPtr CreateSimpleAudioVolume(IAudioClient* client) { bool FillRenderEndpointBufferWithSilence(IAudioClient* client, IAudioRenderClient* render_client) { - RTC_DLOG(INFO) << "FillRenderEndpointBufferWithSilence"; + RTC_DLOG(LS_INFO) << "FillRenderEndpointBufferWithSilence"; RTC_DCHECK(client); RTC_DCHECK(render_client); UINT32 endpoint_buffer_size = 0; @@ -1447,11 +1450,11 @@ bool FillRenderEndpointBufferWithSilence(IAudioClient* client, << ErrorToString(error); return false; } - RTC_DLOG(INFO) << "num_queued_frames: " << num_queued_frames; + RTC_DLOG(LS_INFO) << "num_queued_frames: " << num_queued_frames; BYTE* data = nullptr; int num_frames_to_fill = endpoint_buffer_size - num_queued_frames; - RTC_DLOG(INFO) << "num_frames_to_fill: " << num_frames_to_fill; + RTC_DLOG(LS_INFO) << "num_frames_to_fill: " << num_frames_to_fill; error = render_client->GetBuffer(num_frames_to_fill, &data); if (FAILED(error.Error())) { RTC_LOG(LS_ERROR) << "IAudioRenderClient::GetBuffer failed: " diff --git a/modules/audio_device/win/core_audio_utility_win.h b/modules/audio_device/win/core_audio_utility_win.h index 79203dc954..754b4ddafa 100644 --- a/modules/audio_device/win/core_audio_utility_win.h +++ b/modules/audio_device/win/core_audio_utility_win.h @@ -34,7 +34,7 @@ namespace webrtc { namespace webrtc_win { // Utility class which registers a thread with MMCSS in the constructor and -// deregisters MMCSS in the destructor. The task name is given by |task_name|. +// deregisters MMCSS in the destructor. The task name is given by `task_name`. // The Multimedia Class Scheduler service (MMCSS) enables multimedia // applications to ensure that their time-sensitive processing receives // prioritized access to CPU resources without denying CPU resources to @@ -83,8 +83,8 @@ class ScopedMMCSSRegistration { } explicit ScopedMMCSSRegistration(const wchar_t* task_name) { - RTC_DLOG(INFO) << "ScopedMMCSSRegistration: " << rtc::ToUtf8(task_name); - // Register the calling thread with MMCSS for the supplied |task_name|. + RTC_DLOG(LS_INFO) << "ScopedMMCSSRegistration: " << rtc::ToUtf8(task_name); + // Register the calling thread with MMCSS for the supplied `task_name`. DWORD mmcss_task_index = 0; mmcss_handle_ = AvSetMmThreadCharacteristicsW(task_name, &mmcss_task_index); if (mmcss_handle_ == nullptr) { @@ -93,18 +93,18 @@ class ScopedMMCSSRegistration { } else { const DWORD priority_class = GetPriorityClass(GetCurrentProcess()); const int priority = GetThreadPriority(GetCurrentThread()); - RTC_DLOG(INFO) << "priority class: " - << PriorityClassToString(priority_class) << "(" - << priority_class << ")"; - RTC_DLOG(INFO) << "priority: " << PriorityToString(priority) << "(" - << priority << ")"; + RTC_DLOG(LS_INFO) << "priority class: " + << PriorityClassToString(priority_class) << "(" + << priority_class << ")"; + RTC_DLOG(LS_INFO) << "priority: " << PriorityToString(priority) << "(" + << priority << ")"; } } ~ScopedMMCSSRegistration() { if (Succeeded()) { // Deregister with MMCSS. - RTC_DLOG(INFO) << "~ScopedMMCSSRegistration"; + RTC_DLOG(LS_INFO) << "~ScopedMMCSSRegistration"; AvRevertMmThreadCharacteristics(mmcss_handle_); } } @@ -244,7 +244,7 @@ class ScopedHandle { void Close() { if (handle_) { if (!::CloseHandle(handle_)) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } handle_ = nullptr; } @@ -304,7 +304,7 @@ bool IsMMCSSSupported(); // Header file Mmdeviceapi.h defines the interfaces in the MMDevice API. // Number of active audio devices in the specified data flow direction. -// Set |data_flow| to eAll to retrieve the total number of active audio +// Set `data_flow` to eAll to retrieve the total number of active audio // devices. int NumberOfActiveDevices(EDataFlow data_flow); @@ -327,7 +327,7 @@ std::string GetCommunicationsInputDeviceID(); std::string GetCommunicationsOutputDeviceID(); // Creates an IMMDevice interface corresponding to the unique device id in -// |device_id|, or by data-flow direction and role if |device_id| is set to +// `device_id`, or by data-flow direction and role if `device_id` is set to // AudioDeviceName::kDefaultDeviceId. Microsoft::WRL::ComPtr CreateDevice(const std::string& device_id, EDataFlow data_flow, @@ -339,8 +339,8 @@ Microsoft::WRL::ComPtr CreateDevice(const std::string& device_id, webrtc::AudioDeviceName GetDeviceName(IMMDevice* device); // Gets the user-friendly name of the endpoint device which is represented -// by a unique id in |device_id|, or by data-flow direction and role if -// |device_id| is set to AudioDeviceName::kDefaultDeviceId. +// by a unique id in `device_id`, or by data-flow direction and role if +// `device_id` is set to AudioDeviceName::kDefaultDeviceId. std::string GetFriendlyName(const std::string& device_id, EDataFlow data_flow, ERole role); @@ -349,11 +349,11 @@ std::string GetFriendlyName(const std::string& device_id, EDataFlow GetDataFlow(IMMDevice* device); // Enumerates all input devices and adds the names (friendly name and unique -// device id) to the list in |device_names|. +// device id) to the list in `device_names`. bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names); // Enumerates all output devices and adds the names (friendly name and unique -// device id) to the list in |device_names|. +// device id) to the list in `device_names`. bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names); // The Windows Audio Session API (WASAPI) enables client applications to @@ -361,18 +361,18 @@ bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names); // device. Header files Audioclient.h and Audiopolicy.h define the WASAPI // interfaces. -// Creates an IAudioSessionManager2 interface for the specified |device|. +// Creates an IAudioSessionManager2 interface for the specified `device`. // This interface provides access to e.g. the IAudioSessionEnumerator Microsoft::WRL::ComPtr CreateSessionManager2( IMMDevice* device); -// Creates an IAudioSessionEnumerator interface for the specified |device|. +// Creates an IAudioSessionEnumerator interface for the specified `device`. // The client can use the interface to enumerate audio sessions on the audio // device Microsoft::WRL::ComPtr CreateSessionEnumerator( IMMDevice* device); -// Number of active audio sessions for the given |device|. Expired or inactive +// Number of active audio sessions for the given `device`. Expired or inactive // sessions are not included. int NumberOfActiveSessions(IMMDevice* device); @@ -387,15 +387,15 @@ Microsoft::WRL::ComPtr CreateClient3(const std::string& device_id, EDataFlow data_flow, ERole role); // Sets the AudioCategory_Communications category. Should be called before -// GetSharedModeMixFormat() and IsFormatSupported(). The |client| argument must +// GetSharedModeMixFormat() and IsFormatSupported(). The `client` argument must // be an IAudioClient2 or IAudioClient3 interface pointer, hence only supported // on Windows 8 and above. // TODO(henrika): evaluate effect (if any). HRESULT SetClientProperties(IAudioClient2* client); // Returns the buffer size limits of the hardware audio engine in -// 100-nanosecond units given a specified |format|. Does not require prior -// audio stream initialization. The |client| argument must be an IAudioClient2 +// 100-nanosecond units given a specified `format`. Does not require prior +// audio stream initialization. The `client` argument must be an IAudioClient2 // or IAudioClient3 interface pointer, hence only supported on Windows 8 and // above. // TODO(henrika): always fails with AUDCLNT_E_OFFLOAD_MODE_ONLY. @@ -412,29 +412,29 @@ HRESULT GetBufferSizeLimits(IAudioClient2* client, HRESULT GetSharedModeMixFormat(IAudioClient* client, WAVEFORMATEXTENSIBLE* format); -// Returns true if the specified |client| supports the format in |format| -// for the given |share_mode| (shared or exclusive). The client can call this +// Returns true if the specified `client` supports the format in `format` +// for the given `share_mode` (shared or exclusive). The client can call this // method before calling IAudioClient::Initialize. bool IsFormatSupported(IAudioClient* client, AUDCLNT_SHAREMODE share_mode, const WAVEFORMATEXTENSIBLE* format); // For a shared-mode stream, the audio engine periodically processes the -// data in the endpoint buffer at the period obtained in |device_period|. -// For an exclusive mode stream, |device_period| corresponds to the minimum +// data in the endpoint buffer at the period obtained in `device_period`. +// For an exclusive mode stream, `device_period` corresponds to the minimum // time interval between successive processing by the endpoint device. // This period plus the stream latency between the buffer and endpoint device // represents the minimum possible latency that an audio application can -// achieve. The time in |device_period| is expressed in 100-nanosecond units. +// achieve. The time in `device_period` is expressed in 100-nanosecond units. HRESULT GetDevicePeriod(IAudioClient* client, AUDCLNT_SHAREMODE share_mode, REFERENCE_TIME* device_period); // Returns the range of periodicities supported by the engine for the specified -// stream |format|. The periodicity of the engine is the rate at which the +// stream `format`. The periodicity of the engine is the rate at which the // engine wakes an event-driven audio client to transfer audio data to or from // the engine. Can be used for low-latency support on some devices. -// The |client| argument must be an IAudioClient3 interface pointer, hence only +// The `client` argument must be an IAudioClient3 interface pointer, hence only // supported on Windows 10 and above. HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3, const WAVEFORMATEXTENSIBLE* format, @@ -443,14 +443,14 @@ HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3, uint32_t* min_period_in_frames, uint32_t* max_period_in_frames); -// Get the preferred audio parameters for the given |client| corresponding to +// Get the preferred audio parameters for the given `client` corresponding to // the stream format that the audio engine uses for its internal processing of // shared-mode streams. The acquired values should only be utilized for shared // mode streamed since there are no preferred settings for an exclusive mode // stream. HRESULT GetPreferredAudioParameters(IAudioClient* client, webrtc::AudioParameters* params); -// As above but override the preferred sample rate and use |sample_rate| +// As above but override the preferred sample rate and use `sample_rate` // instead. Intended mainly for testing purposes and in combination with rate // conversion. HRESULT GetPreferredAudioParameters(IAudioClient* client, @@ -461,20 +461,20 @@ HRESULT GetPreferredAudioParameters(IAudioClient* client, // the client must initialize it once, and only once, to initialize the audio // stream between the client and the device. In shared mode, the client // connects indirectly through the audio engine which does the mixing. -// If a valid event is provided in |event_handle|, the client will be -// initialized for event-driven buffer handling. If |event_handle| is set to +// If a valid event is provided in `event_handle`, the client will be +// initialized for event-driven buffer handling. If `event_handle` is set to // nullptr, event-driven buffer handling is not utilized. To achieve the // minimum stream latency between the client application and audio endpoint -// device, set |buffer_duration| to 0. A client has the option of requesting a +// device, set `buffer_duration` to 0. A client has the option of requesting a // buffer size that is larger than what is strictly necessary to make timing // glitches rare or nonexistent. Increasing the buffer size does not necessarily // increase the stream latency. Each unit of reference time is 100 nanoseconds. -// The |auto_convert_pcm| parameter can be used for testing purposes to ensure +// The `auto_convert_pcm` parameter can be used for testing purposes to ensure // that the sample rate of the client side does not have to match the audio -// engine mix format. If |auto_convert_pcm| is set to true, a rate converter -// will be inserted to convert between the sample rate in |format| and the +// engine mix format. If `auto_convert_pcm` is set to true, a rate converter +// will be inserted to convert between the sample rate in `format` and the // preferred rate given by GetPreferredAudioParameters(). -// The output parameter |endpoint_buffer_size| contains the size of the +// The output parameter `endpoint_buffer_size` contains the size of the // endpoint buffer and it is expressed as the number of audio frames the // buffer can hold. HRESULT SharedModeInitialize(IAudioClient* client, @@ -486,7 +486,7 @@ HRESULT SharedModeInitialize(IAudioClient* client, // Works as SharedModeInitialize() but adds support for using smaller engine // periods than the default period. -// The |client| argument must be an IAudioClient3 interface pointer, hence only +// The `client` argument must be an IAudioClient3 interface pointer, hence only // supported on Windows 10 and above. // TODO(henrika): can probably be merged into SharedModeInitialize() to avoid // duplicating code. Keeping as separate method for now until decided if we @@ -499,52 +499,52 @@ HRESULT SharedModeInitializeLowLatency(IAudioClient3* client, uint32_t* endpoint_buffer_size); // Creates an IAudioRenderClient client for an existing IAudioClient given by -// |client|. The IAudioRenderClient interface enables a client to write +// `client`. The IAudioRenderClient interface enables a client to write // output data to a rendering endpoint buffer. The methods in this interface // manage the movement of data packets that contain audio-rendering data. Microsoft::WRL::ComPtr CreateRenderClient( IAudioClient* client); // Creates an IAudioCaptureClient client for an existing IAudioClient given by -// |client|. The IAudioCaptureClient interface enables a client to read +// `client`. The IAudioCaptureClient interface enables a client to read // input data from a capture endpoint buffer. The methods in this interface // manage the movement of data packets that contain capture data. Microsoft::WRL::ComPtr CreateCaptureClient( IAudioClient* client); // Creates an IAudioClock interface for an existing IAudioClient given by -// |client|. The IAudioClock interface enables a client to monitor a stream's +// `client`. The IAudioClock interface enables a client to monitor a stream's // data rate and the current position in the stream. Microsoft::WRL::ComPtr CreateAudioClock(IAudioClient* client); // Creates an AudioSessionControl interface for an existing IAudioClient given -// by |client|. The IAudioControl interface enables a client to configure the +// by `client`. The IAudioControl interface enables a client to configure the // control parameters for an audio session and to monitor events in the session. Microsoft::WRL::ComPtr CreateAudioSessionControl( IAudioClient* client); // Creates an ISimpleAudioVolume interface for an existing IAudioClient given by -// |client|. This interface enables a client to control the master volume level +// `client`. This interface enables a client to control the master volume level // of an active audio session. Microsoft::WRL::ComPtr CreateSimpleAudioVolume( IAudioClient* client); // Fills up the endpoint rendering buffer with silence for an existing -// IAudioClient given by |client| and a corresponding IAudioRenderClient -// given by |render_client|. +// IAudioClient given by `client` and a corresponding IAudioRenderClient +// given by `render_client`. bool FillRenderEndpointBufferWithSilence(IAudioClient* client, IAudioRenderClient* render_client); -// Prints/logs all fields of the format structure in |format|. +// Prints/logs all fields of the format structure in `format`. // Also supports extended versions (WAVEFORMATEXTENSIBLE). -std::string WaveFormatToString(const WaveFormatWrapper format); +std::string WaveFormatToString(WaveFormatWrapper format); // Converts Windows internal REFERENCE_TIME (100 nanosecond units) into // generic webrtc::TimeDelta which then can be converted to any time unit. webrtc::TimeDelta ReferenceTimeToTimeDelta(REFERENCE_TIME time); -// Converts size expressed in number of audio frames, |num_frames|, into -// milliseconds given a specified |sample_rate|. +// Converts size expressed in number of audio frames, `num_frames`, into +// milliseconds given a specified `sample_rate`. double FramesToMilliseconds(uint32_t num_frames, uint16_t sample_rate); // Converts a COM error into a human-readable string. diff --git a/modules/audio_device/win/core_audio_utility_win_unittest.cc b/modules/audio_device/win/core_audio_utility_win_unittest.cc index 9f1ce5e75e..277f54eb35 100644 --- a/modules/audio_device/win/core_audio_utility_win_unittest.cc +++ b/modules/audio_device/win/core_audio_utility_win_unittest.cc @@ -107,7 +107,7 @@ TEST_F(CoreAudioUtilityWinTest, WaveFormatWrapper) { TEST_F(CoreAudioUtilityWinTest, WaveFormatWrapperExtended) { // Use default constructor for WAVEFORMATEXTENSIBLE and verify that it - // results in same size as for WAVEFORMATEX even if the size of |format_ex| + // results in same size as for WAVEFORMATEX even if the size of `format_ex` // equals the size of WAVEFORMATEXTENSIBLE. WAVEFORMATEXTENSIBLE format_ex = {}; core_audio_utility::WaveFormatWrapper wave_format_ex(&format_ex); @@ -319,7 +319,7 @@ TEST_F(CoreAudioUtilityWinTest, CreateSessionManager2) { EDataFlow data_flow[] = {eRender, eCapture}; // Obtain reference to an IAudioSessionManager2 interface for a default audio - // endpoint device specified by two different data flows and the |eConsole| + // endpoint device specified by two different data flows and the `eConsole` // role. for (size_t i = 0; i < arraysize(data_flow); ++i) { ComPtr device(core_audio_utility::CreateDevice( @@ -339,7 +339,7 @@ TEST_F(CoreAudioUtilityWinTest, CreateSessionEnumerator) { // Obtain reference to an IAudioSessionEnumerator interface for a default // audio endpoint device specified by two different data flows and the - // |eConsole| role. + // `eConsole` role. for (size_t i = 0; i < arraysize(data_flow); ++i) { ComPtr device(core_audio_utility::CreateDevice( AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole)); @@ -364,7 +364,7 @@ TEST_F(CoreAudioUtilityWinTest, NumberOfActiveSessions) { EDataFlow data_flow[] = {eRender, eCapture}; // Count number of active audio session for a default audio endpoint device - // specified by two different data flows and the |eConsole| role. + // specified by two different data flows and the `eConsole` role. // Ensure that the number of active audio sessions is less than or equal to // the total number of audio sessions on that same device. for (size_t i = 0; i < arraysize(data_flow); ++i) { @@ -394,7 +394,7 @@ TEST_F(CoreAudioUtilityWinTest, CreateClient) { EDataFlow data_flow[] = {eRender, eCapture}; // Obtain reference to an IAudioClient interface for a default audio endpoint - // device specified by two different data flows and the |eConsole| role. + // device specified by two different data flows and the `eConsole` role. for (size_t i = 0; i < arraysize(data_flow); ++i) { ComPtr client = core_audio_utility::CreateClient( AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole); @@ -409,7 +409,7 @@ TEST_F(CoreAudioUtilityWinTest, CreateClient2) { EDataFlow data_flow[] = {eRender, eCapture}; // Obtain reference to an IAudioClient2 interface for a default audio endpoint - // device specified by two different data flows and the |eConsole| role. + // device specified by two different data flows and the `eConsole` role. for (size_t i = 0; i < arraysize(data_flow); ++i) { ComPtr client2 = core_audio_utility::CreateClient2( AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole); @@ -424,7 +424,7 @@ TEST_F(CoreAudioUtilityWinTest, CreateClient3) { EDataFlow data_flow[] = {eRender, eCapture}; // Obtain reference to an IAudioClient3 interface for a default audio endpoint - // device specified by two different data flows and the |eConsole| role. + // device specified by two different data flows and the `eConsole` role. for (size_t i = 0; i < arraysize(data_flow); ++i) { ComPtr client3 = core_audio_utility::CreateClient3( AudioDeviceName::kDefaultDeviceId, data_flow[i], eConsole); diff --git a/modules/audio_mixer/audio_mixer_impl.h b/modules/audio_mixer/audio_mixer_impl.h index 737fcbdc43..76b1131777 100644 --- a/modules/audio_mixer/audio_mixer_impl.h +++ b/modules/audio_mixer/audio_mixer_impl.h @@ -22,7 +22,6 @@ #include "api/scoped_refptr.h" #include "modules/audio_mixer/frame_combiner.h" #include "modules/audio_mixer/output_rate_calculator.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/race_checker.h" #include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" @@ -48,6 +47,9 @@ class AudioMixerImpl : public AudioMixer { ~AudioMixerImpl() override; + AudioMixerImpl(const AudioMixerImpl&) = delete; + AudioMixerImpl& operator=(const AudioMixerImpl&) = delete; + // AudioMixer functions bool AddSource(Source* audio_source) override; void RemoveSource(Source* audio_source) override; @@ -92,8 +94,6 @@ class AudioMixerImpl : public AudioMixer { // Component that handles actual adding of audio frames. FrameCombiner frame_combiner_; - - RTC_DISALLOW_COPY_AND_ASSIGN(AudioMixerImpl); }; } // namespace webrtc diff --git a/modules/audio_mixer/audio_mixer_impl_unittest.cc b/modules/audio_mixer/audio_mixer_impl_unittest.cc index 61aa74e0a1..4087748006 100644 --- a/modules/audio_mixer/audio_mixer_impl_unittest.cc +++ b/modules/audio_mixer/audio_mixer_impl_unittest.cc @@ -126,8 +126,8 @@ class CustomRateCalculator : public OutputRateCalculator { const int rate_; }; -// Creates participants from |frames| and |frame_info| and adds them -// to the mixer. Compares mixed status with |expected_status| +// Creates participants from `frames` and `frame_info` and adds them +// to the mixer. Compares mixed status with `expected_status` void MixAndCompare( const std::vector& frames, const std::vector& frame_info, @@ -343,7 +343,7 @@ TEST(AudioMixer, RampedOutSourcesShouldNotBeMarkedMixed) { for (int i = 0; i < kAudioSources; ++i) { ResetFrame(participants[i].fake_frame()); // Set the participant audio energy to increase with the index - // |i|. + // `i`. participants[i].fake_frame()->mutable_data()[0] = 100 * i; } diff --git a/modules/audio_mixer/default_output_rate_calculator.h b/modules/audio_mixer/default_output_rate_calculator.h index a7aaf681a9..02a3b5c37b 100644 --- a/modules/audio_mixer/default_output_rate_calculator.h +++ b/modules/audio_mixer/default_output_rate_calculator.h @@ -24,8 +24,8 @@ class DefaultOutputRateCalculator : public OutputRateCalculator { // Produces the least native rate greater or equal to the preferred // sample rates. A native rate is one in - // AudioProcessing::NativeRate. If |preferred_sample_rates| is - // empty, returns |kDefaultFrequency|. + // AudioProcessing::NativeRate. If `preferred_sample_rates` is + // empty, returns `kDefaultFrequency`. int CalculateOutputRateFromRange( rtc::ArrayView preferred_sample_rates) override; ~DefaultOutputRateCalculator() override {} diff --git a/modules/audio_mixer/g3doc/index.md b/modules/audio_mixer/g3doc/index.md index 285530e95a..4ced289bf8 100644 --- a/modules/audio_mixer/g3doc/index.md +++ b/modules/audio_mixer/g3doc/index.md @@ -7,7 +7,7 @@ The WebRTC audio mixer module is responsible for mixing multiple incoming audio streams (sources) into a single audio stream (mix). It works with 10 ms frames, it supports sample rates up to 48 kHz and up to 8 audio channels. The API is defined in -[`api/audio/audio_mixer.h`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/audio/audio_mixer.h) +[`api/audio/audio_mixer.h`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/audio/audio_mixer.h) and it includes the definition of [`AudioMixer::Source`](https://source.chromium.org/search?q=symbol:AudioMixer::Source%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h), which describes an incoming audio stream, and the definition of @@ -39,9 +39,9 @@ method allows to generates a mix with the desired number of channels. The interface is implemented in different parts of WebRTC: * [`AudioMixer::Source`](https://source.chromium.org/search?q=symbol:AudioMixer::Source%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h): - [`audio/audio_receive_stream.h`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/audio/audio_receive_stream.h) + [`audio/audio_receive_stream.h`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/audio/audio_receive_stream.h) * [`AudioMixer`](https://source.chromium.org/search?q=symbol:AudioMixer%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h): - [`modules/audio_mixer/audio_mixer_impl.h`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_mixer/audio_mixer_impl.h) + [`modules/audio_mixer/audio_mixer_impl.h`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_mixer/audio_mixer_impl.h) [`AudioMixer`](https://source.chromium.org/search?q=symbol:AudioMixer%20file:third_party%2Fwebrtc%2Fapi%2Faudio%2Faudio_mixer.h) is thread-safe. The output sample rate of the generated mix is automatically @@ -49,6 +49,6 @@ assigned depending on the sample rate of the sources; whereas the number of output channels is defined by the caller[^2]. Samples from the non-muted sources are summed up and then a limiter is used to apply soft-clipping when needed. -[^2]: [`audio/utility/channel_mixer.h`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/audio/utility/channel_mixer.h) +[^2]: [`audio/utility/channel_mixer.h`](https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/audio/utility/channel_mixer.h) is used to mix channels in the non-trivial cases - i.e., if the number of channels for a source or the mix is greater than 3. diff --git a/modules/audio_processing/BUILD.gn b/modules/audio_processing/BUILD.gn index a733612ccc..ee6b579617 100644 --- a/modules/audio_processing/BUILD.gn +++ b/modules/audio_processing/BUILD.gn @@ -19,15 +19,6 @@ config("apm_debug_dump") { } } -rtc_library("config") { - visibility = [ ":*" ] - sources = [ - "include/config.cc", - "include/config.h", - ] - deps = [ "../../rtc_base/system:rtc_export" ] -} - rtc_library("api") { visibility = [ "*" ] sources = [ @@ -37,7 +28,6 @@ rtc_library("api") { deps = [ ":audio_frame_view", ":audio_processing_statistics", - ":config", "../../api:array_view", "../../api:scoped_refptr", "../../api/audio:aec3_config", @@ -122,6 +112,33 @@ rtc_source_set("aec_dump_interface") { absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ] } +rtc_library("gain_controller2") { + configs += [ ":apm_debug_dump" ] + sources = [ + "gain_controller2.cc", + "gain_controller2.h", + ] + defines = [] + deps = [ + ":aec_dump_interface", + ":api", + ":apm_logging", + ":audio_buffer", + ":audio_frame_view", + "../../common_audio", + "../../rtc_base:atomicops", + "../../rtc_base:checks", + "../../rtc_base:logging", + "../../rtc_base:stringutils", + "../../system_wrappers:field_trial", + "agc2:adaptive_digital", + "agc2:cpu_features", + "agc2:fixed_digital", + "agc2:gain_applier", + "agc2:vad_wrapper", + ] +} + rtc_library("audio_processing") { visibility = [ "*" ] configs += [ ":apm_debug_dump" ] @@ -129,26 +146,11 @@ rtc_library("audio_processing") { "audio_processing_builder_impl.cc", "audio_processing_impl.cc", "audio_processing_impl.h", - "common.h", "echo_control_mobile_impl.cc", "echo_control_mobile_impl.h", - "echo_detector/circular_buffer.cc", - "echo_detector/circular_buffer.h", - "echo_detector/mean_variance_estimator.cc", - "echo_detector/mean_variance_estimator.h", - "echo_detector/moving_max.cc", - "echo_detector/moving_max.h", - "echo_detector/normalized_covariance_estimator.cc", - "echo_detector/normalized_covariance_estimator.h", "gain_control_impl.cc", "gain_control_impl.h", - "gain_controller2.cc", - "gain_controller2.h", - "level_estimator.cc", - "level_estimator.h", "render_queue_item_verifier.h", - "residual_echo_detector.cc", - "residual_echo_detector.h", "typing_detection.cc", "typing_detection.h", ] @@ -162,11 +164,10 @@ rtc_library("audio_processing") { ":audio_frame_proxies", ":audio_frame_view", ":audio_processing_statistics", - ":config", + ":gain_controller2", ":high_pass_filter", ":optionally_built_submodule_creators", ":rms_level", - ":voice_detection", "../../api:array_view", "../../api:function_view", "../../api/audio:aec3_config", @@ -184,17 +185,16 @@ rtc_library("audio_processing") { "../../rtc_base/synchronization:mutex", "../../rtc_base/system:rtc_export", "../../system_wrappers", + "../../system_wrappers:denormal_disabler", "../../system_wrappers:field_trial", "../../system_wrappers:metrics", "aec3", "aec_dump:aec_dump", "aecm:aecm_core", "agc", + "agc:analog_gain_stats_reporter", "agc:gain_control_interface", "agc:legacy_agc", - "agc2:adaptive_digital", - "agc2:fixed_digital", - "agc2:gain_applier", "capture_levels_adjuster", "ns", "transient:transient_suppressor_api", @@ -217,18 +217,31 @@ rtc_library("audio_processing") { } } -rtc_library("voice_detection") { +rtc_library("residual_echo_detector") { + poisonous = [ "default_echo_detector" ] + configs += [ ":apm_debug_dump" ] sources = [ - "voice_detection.cc", - "voice_detection.h", + "echo_detector/circular_buffer.cc", + "echo_detector/circular_buffer.h", + "echo_detector/mean_variance_estimator.cc", + "echo_detector/mean_variance_estimator.h", + "echo_detector/moving_max.cc", + "echo_detector/moving_max.h", + "echo_detector/normalized_covariance_estimator.cc", + "echo_detector/normalized_covariance_estimator.h", + "residual_echo_detector.cc", + "residual_echo_detector.h", ] deps = [ ":api", - ":audio_buffer", - "../../api/audio:audio_frame_api", - "../../common_audio:common_audio_c", + ":apm_logging", + "../../api:array_view", "../../rtc_base:checks", + "../../rtc_base:logging", + "../../rtc_base:rtc_base_approved", + "../../system_wrappers:metrics", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("optionally_built_submodule_creators") { @@ -334,7 +347,6 @@ if (rtc_include_tests) { sources = [ "audio_buffer_unittest.cc", "audio_frame_view_unittest.cc", - "config_unittest.cc", "echo_control_mobile_unittest.cc", "gain_controller2_unittest.cc", "splitting_filter_unittest.cc", @@ -349,14 +361,14 @@ if (rtc_include_tests) { ":audio_frame_view", ":audio_processing", ":audioproc_test_utils", - ":config", + ":gain_controller2", ":high_pass_filter", ":mocks", - ":voice_detection", "../../api:array_view", "../../api:scoped_refptr", "../../api/audio:aec3_config", "../../api/audio:aec3_factory", + "../../api/audio:echo_detector_creator", "../../common_audio", "../../common_audio:common_audio_c", "../../rtc_base", @@ -373,6 +385,7 @@ if (rtc_include_tests) { "../../rtc_base/system:arch", "../../rtc_base/system:file_wrapper", "../../system_wrappers", + "../../system_wrappers:denormal_disabler", "../../test:fileutils", "../../test:rtc_expect_death", "../../test:test_support", @@ -383,8 +396,8 @@ if (rtc_include_tests) { "agc2:biquad_filter_unittests", "agc2:fixed_digital_unittests", "agc2:noise_estimator_unittests", - "agc2:rnn_vad_with_level_unittests", "agc2:test_utils", + "agc2:vad_wrapper_unittests", "agc2/rnn_vad:unittests", "capture_levels_adjuster", "capture_levels_adjuster:capture_levels_adjuster_unittests", @@ -413,6 +426,7 @@ if (rtc_include_tests) { ":audioproc_test_utils", ":audioproc_unittest_proto", ":optionally_built_submodule_creators", + ":residual_echo_detector", ":rms_level", ":runtime_settings_protobuf_utils", "../../api/audio:audio_frame_api", @@ -434,7 +448,6 @@ if (rtc_include_tests) { "echo_detector/normalized_covariance_estimator_unittest.cc", "gain_control_unittest.cc", "high_pass_filter_unittest.cc", - "level_estimator_unittest.cc", "residual_echo_detector_unittest.cc", "rms_level_unittest.cc", "test/debug_dump_replayer.cc", @@ -445,7 +458,6 @@ if (rtc_include_tests) { "test/echo_canceller_test_tools_unittest.cc", "test/echo_control_mock.h", "test/test_utils.h", - "voice_detection_unittest.cc", ] } } @@ -513,6 +525,7 @@ if (rtc_include_tests) { ":runtime_settings_protobuf_utils", "../../api/audio:aec3_config_json", "../../api/audio:aec3_factory", + "../../api/audio:echo_detector_creator", "../../common_audio", "../../rtc_base:checks", "../../rtc_base:ignore_wundef", diff --git a/modules/audio_processing/aec3/adaptive_fir_filter.cc b/modules/audio_processing/aec3/adaptive_fir_filter.cc index bf3a7809f4..917aa951ee 100644 --- a/modules/audio_processing/aec3/adaptive_fir_filter.cc +++ b/modules/audio_processing/aec3/adaptive_fir_filter.cc @@ -68,19 +68,21 @@ void ComputeFrequencyResponse_Neon( RTC_DCHECK_EQ(H.size(), H2->capacity()); for (size_t p = 0; p < num_partitions; ++p) { RTC_DCHECK_EQ(kFftLengthBy2Plus1, (*H2)[p].size()); + auto& H2_p = (*H2)[p]; for (size_t ch = 0; ch < num_render_channels; ++ch) { + const FftData& H_p_ch = H[p][ch]; for (size_t j = 0; j < kFftLengthBy2; j += 4) { - const float32x4_t re = vld1q_f32(&H[p][ch].re[j]); - const float32x4_t im = vld1q_f32(&H[p][ch].im[j]); + const float32x4_t re = vld1q_f32(&H_p_ch.re[j]); + const float32x4_t im = vld1q_f32(&H_p_ch.im[j]); float32x4_t H2_new = vmulq_f32(re, re); H2_new = vmlaq_f32(H2_new, im, im); - float32x4_t H2_p_j = vld1q_f32(&(*H2)[p][j]); + float32x4_t H2_p_j = vld1q_f32(&H2_p[j]); H2_p_j = vmaxq_f32(H2_p_j, H2_new); - vst1q_f32(&(*H2)[p][j], H2_p_j); + vst1q_f32(&H2_p[j], H2_p_j); } - float H2_new = H[p][ch].re[kFftLengthBy2] * H[p][ch].re[kFftLengthBy2] + - H[p][ch].im[kFftLengthBy2] * H[p][ch].im[kFftLengthBy2]; - (*H2)[p][kFftLengthBy2] = std::max((*H2)[p][kFftLengthBy2], H2_new); + float H2_new = H_p_ch.re[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2] + + H_p_ch.im[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2]; + H2_p[kFftLengthBy2] = std::max(H2_p[kFftLengthBy2], H2_new); } } } @@ -101,20 +103,22 @@ void ComputeFrequencyResponse_Sse2( // constexpr __mmmask8 kMaxMask = static_cast<__mmmask8>(256u); for (size_t p = 0; p < num_partitions; ++p) { RTC_DCHECK_EQ(kFftLengthBy2Plus1, (*H2)[p].size()); + auto& H2_p = (*H2)[p]; for (size_t ch = 0; ch < num_render_channels; ++ch) { + const FftData& H_p_ch = H[p][ch]; for (size_t j = 0; j < kFftLengthBy2; j += 4) { - const __m128 re = _mm_loadu_ps(&H[p][ch].re[j]); + const __m128 re = _mm_loadu_ps(&H_p_ch.re[j]); const __m128 re2 = _mm_mul_ps(re, re); - const __m128 im = _mm_loadu_ps(&H[p][ch].im[j]); + const __m128 im = _mm_loadu_ps(&H_p_ch.im[j]); const __m128 im2 = _mm_mul_ps(im, im); const __m128 H2_new = _mm_add_ps(re2, im2); - __m128 H2_k_j = _mm_loadu_ps(&(*H2)[p][j]); + __m128 H2_k_j = _mm_loadu_ps(&H2_p[j]); H2_k_j = _mm_max_ps(H2_k_j, H2_new); - _mm_storeu_ps(&(*H2)[p][j], H2_k_j); + _mm_storeu_ps(&H2_p[j], H2_k_j); } - float H2_new = H[p][ch].re[kFftLengthBy2] * H[p][ch].re[kFftLengthBy2] + - H[p][ch].im[kFftLengthBy2] * H[p][ch].im[kFftLengthBy2]; - (*H2)[p][kFftLengthBy2] = std::max((*H2)[p][kFftLengthBy2], H2_new); + float H2_new = H_p_ch.re[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2] + + H_p_ch.im[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2]; + H2_p[kFftLengthBy2] = std::max(H2_p[kFftLengthBy2], H2_new); } } } diff --git a/modules/audio_processing/aec3/adaptive_fir_filter_avx2.cc b/modules/audio_processing/aec3/adaptive_fir_filter_avx2.cc index 245b45ac31..6c8c948026 100644 --- a/modules/audio_processing/aec3/adaptive_fir_filter_avx2.cc +++ b/modules/audio_processing/aec3/adaptive_fir_filter_avx2.cc @@ -31,19 +31,21 @@ void ComputeFrequencyResponse_Avx2( RTC_DCHECK_EQ(H.size(), H2->capacity()); for (size_t p = 0; p < num_partitions; ++p) { RTC_DCHECK_EQ(kFftLengthBy2Plus1, (*H2)[p].size()); + auto& H2_p = (*H2)[p]; for (size_t ch = 0; ch < num_render_channels; ++ch) { + const FftData& H_p_ch = H[p][ch]; for (size_t j = 0; j < kFftLengthBy2; j += 8) { - __m256 re = _mm256_loadu_ps(&H[p][ch].re[j]); + __m256 re = _mm256_loadu_ps(&H_p_ch.re[j]); __m256 re2 = _mm256_mul_ps(re, re); - __m256 im = _mm256_loadu_ps(&H[p][ch].im[j]); + __m256 im = _mm256_loadu_ps(&H_p_ch.im[j]); re2 = _mm256_fmadd_ps(im, im, re2); - __m256 H2_k_j = _mm256_loadu_ps(&(*H2)[p][j]); + __m256 H2_k_j = _mm256_loadu_ps(&H2_p[j]); H2_k_j = _mm256_max_ps(H2_k_j, re2); - _mm256_storeu_ps(&(*H2)[p][j], H2_k_j); + _mm256_storeu_ps(&H2_p[j], H2_k_j); } - float H2_new = H[p][ch].re[kFftLengthBy2] * H[p][ch].re[kFftLengthBy2] + - H[p][ch].im[kFftLengthBy2] * H[p][ch].im[kFftLengthBy2]; - (*H2)[p][kFftLengthBy2] = std::max((*H2)[p][kFftLengthBy2], H2_new); + float H2_new = H_p_ch.re[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2] + + H_p_ch.im[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2]; + H2_p[kFftLengthBy2] = std::max(H2_p[kFftLengthBy2], H2_new); } } } diff --git a/modules/audio_processing/aec3/aec3_common.cc b/modules/audio_processing/aec3/aec3_common.cc index 7bd8d6267a..3ba10d5baf 100644 --- a/modules/audio_processing/aec3/aec3_common.cc +++ b/modules/audio_processing/aec3/aec3_common.cc @@ -29,9 +29,9 @@ Aec3Optimization DetectOptimization() { #if defined(WEBRTC_HAS_NEON) return Aec3Optimization::kNeon; -#endif - +#else return Aec3Optimization::kNone; +#endif } float FastApproxLog2f(const float in) { diff --git a/modules/audio_processing/aec3/aec3_common.h b/modules/audio_processing/aec3/aec3_common.h index 3bfff967a0..32b564f14b 100644 --- a/modules/audio_processing/aec3/aec3_common.h +++ b/modules/audio_processing/aec3/aec3_common.h @@ -85,10 +85,10 @@ constexpr size_t GetRenderDelayBufferSize(size_t down_sampling_factor, Aec3Optimization DetectOptimization(); // Computes the log2 of the input in a fast an approximate manner. -float FastApproxLog2f(const float in); +float FastApproxLog2f(float in); // Returns dB from a power quantity expressed in log2. -float Log2TodB(const float in_log2); +float Log2TodB(float in_log2); static_assert(1 << kBlockSizeLog2 == kBlockSize, "Proper number of shifts for blocksize"); diff --git a/modules/audio_processing/aec3/aec3_fft.cc b/modules/audio_processing/aec3/aec3_fft.cc index 8dfa183367..9cc8016f0b 100644 --- a/modules/audio_processing/aec3/aec3_fft.cc +++ b/modules/audio_processing/aec3/aec3_fft.cc @@ -101,10 +101,10 @@ void Aec3Fft::ZeroPaddedFft(rtc::ArrayView x, [](float a, float b) { return a * b; }); break; case Window::kSqrtHanning: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); break; default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } Fft(&fft, X); @@ -125,7 +125,7 @@ void Aec3Fft::PaddedFft(rtc::ArrayView x, std::copy(x.begin(), x.end(), fft.begin() + x_old.size()); break; case Window::kHanning: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); break; case Window::kSqrtHanning: std::transform(x_old.begin(), x_old.end(), std::begin(kSqrtHanning128), @@ -135,7 +135,7 @@ void Aec3Fft::PaddedFft(rtc::ArrayView x, fft.begin() + x_old.size(), std::multiplies()); break; default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } Fft(&fft, X); diff --git a/modules/audio_processing/aec3/aec3_fft.h b/modules/audio_processing/aec3/aec3_fft.h index 6f7fbe4d0e..c68de53963 100644 --- a/modules/audio_processing/aec3/aec3_fft.h +++ b/modules/audio_processing/aec3/aec3_fft.h @@ -18,7 +18,6 @@ #include "modules/audio_processing/aec3/aec3_common.h" #include "modules/audio_processing/aec3/fft_data.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -30,6 +29,9 @@ class Aec3Fft { Aec3Fft(); + Aec3Fft(const Aec3Fft&) = delete; + Aec3Fft& operator=(const Aec3Fft&) = delete; + // Computes the FFT. Note that both the input and output are modified. void Fft(std::array* x, FftData* X) const { RTC_DCHECK(x); @@ -66,8 +68,6 @@ class Aec3Fft { private: const OouraFft ooura_fft_; - - RTC_DISALLOW_COPY_AND_ASSIGN(Aec3Fft); }; } // namespace webrtc diff --git a/modules/audio_processing/aec3/aec_state.cc b/modules/audio_processing/aec3/aec_state.cc index 21cad2186f..b09acfd892 100644 --- a/modules/audio_processing/aec3/aec_state.cc +++ b/modules/audio_processing/aec3/aec_state.cc @@ -229,8 +229,9 @@ void AecState::Update( std::array avg_render_spectrum_with_reverb; ComputeAvgRenderReverb(render_buffer.GetSpectrumBuffer(), - delay_state_.MinDirectPathFilterDelay(), ReverbDecay(), - &avg_render_reverb_, avg_render_spectrum_with_reverb); + delay_state_.MinDirectPathFilterDelay(), + ReverbDecay(/*mild=*/false), &avg_render_reverb_, + avg_render_spectrum_with_reverb); if (config_.echo_audibility.use_stationarity_properties) { // Update the echo audibility evaluator. diff --git a/modules/audio_processing/aec3/aec_state.h b/modules/audio_processing/aec3/aec_state.h index e2f70a4c68..5994465688 100644 --- a/modules/audio_processing/aec3/aec_state.h +++ b/modules/audio_processing/aec3/aec_state.h @@ -116,8 +116,12 @@ class AecState { // Takes appropriate action at an echo path change. void HandleEchoPathChange(const EchoPathVariability& echo_path_variability); - // Returns the decay factor for the echo reverberation. - float ReverbDecay() const { return reverb_model_estimator_.ReverbDecay(); } + // Returns the decay factor for the echo reverberation. The parameter `mild` + // indicates which exponential decay to return. The default one or a milder + // one that can be used during nearend regions. + float ReverbDecay(bool mild) const { + return reverb_model_estimator_.ReverbDecay(mild); + } // Return the frequency response of the reverberant echo. rtc::ArrayView GetReverbFrequencyResponse() const { diff --git a/modules/audio_processing/aec3/alignment_mixer.h b/modules/audio_processing/aec3/alignment_mixer.h index 682aec9124..aa1830fc03 100644 --- a/modules/audio_processing/aec3/alignment_mixer.h +++ b/modules/audio_processing/aec3/alignment_mixer.h @@ -49,7 +49,7 @@ class AlignmentMixer { int selected_channel_ = 0; size_t block_counter_ = 0; - void Downmix(const rtc::ArrayView> x, + void Downmix(rtc::ArrayView> x, rtc::ArrayView y) const; int SelectChannel(rtc::ArrayView> x); }; diff --git a/modules/audio_processing/aec3/block_delay_buffer.cc b/modules/audio_processing/aec3/block_delay_buffer.cc index b9eb3c9f93..059bbafcdb 100644 --- a/modules/audio_processing/aec3/block_delay_buffer.cc +++ b/modules/audio_processing/aec3/block_delay_buffer.cc @@ -41,17 +41,24 @@ void BlockDelayBuffer::DelaySignal(AudioBuffer* frame) { RTC_DCHECK_EQ(buf_[ch].size(), frame->num_bands()); RTC_DCHECK_EQ(buf_[ch].size(), num_bands); rtc::ArrayView frame_ch(frame->split_bands(ch), num_bands); + const size_t delay = delay_; for (size_t band = 0; band < num_bands; ++band) { RTC_DCHECK_EQ(delay_, buf_[ch][band].size()); i = i_start; - for (size_t k = 0; k < frame_length_; ++k) { - const float tmp = buf_[ch][band][i]; - buf_[ch][band][i] = frame_ch[band][k]; - frame_ch[band][k] = tmp; + // Offloading these pointers and class variables to local variables allows + // the compiler to optimize the below loop when compiling with + // '-fno-strict-aliasing'. + float* buf_ch_band = buf_[ch][band].data(); + float* frame_ch_band = frame_ch[band]; - i = i < delay_ - 1 ? i + 1 : 0; + for (size_t k = 0, frame_length = frame_length_; k < frame_length; ++k) { + const float tmp = buf_ch_band[i]; + buf_ch_band[i] = frame_ch_band[k]; + frame_ch_band[k] = tmp; + + i = i < delay - 1 ? i + 1 : 0; } } } diff --git a/modules/audio_processing/aec3/block_processor_metrics.h b/modules/audio_processing/aec3/block_processor_metrics.h index 4ba053683b..a70d0dac5b 100644 --- a/modules/audio_processing/aec3/block_processor_metrics.h +++ b/modules/audio_processing/aec3/block_processor_metrics.h @@ -11,8 +11,6 @@ #ifndef MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_METRICS_H_ #define MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_METRICS_H_ -#include "rtc_base/constructor_magic.h" - namespace webrtc { // Handles the reporting of metrics for the block_processor. @@ -20,6 +18,9 @@ class BlockProcessorMetrics { public: BlockProcessorMetrics() = default; + BlockProcessorMetrics(const BlockProcessorMetrics&) = delete; + BlockProcessorMetrics& operator=(const BlockProcessorMetrics&) = delete; + // Updates the metric with new capture data. void UpdateCapture(bool underrun); @@ -38,8 +39,6 @@ class BlockProcessorMetrics { int render_buffer_underruns_ = 0; int render_buffer_overruns_ = 0; int buffer_render_calls_ = 0; - - RTC_DISALLOW_COPY_AND_ASSIGN(BlockProcessorMetrics); }; } // namespace webrtc diff --git a/modules/audio_processing/aec3/comfort_noise_generator.h b/modules/audio_processing/aec3/comfort_noise_generator.h index 16eaf3550f..2785b765c5 100644 --- a/modules/audio_processing/aec3/comfort_noise_generator.h +++ b/modules/audio_processing/aec3/comfort_noise_generator.h @@ -19,7 +19,6 @@ #include "modules/audio_processing/aec3/aec3_common.h" #include "modules/audio_processing/aec3/aec_state.h" #include "modules/audio_processing/aec3/fft_data.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/system/arch.h" namespace webrtc { diff --git a/modules/audio_processing/aec3/decimator.h b/modules/audio_processing/aec3/decimator.h index 3ccd292f08..dbff3d9fff 100644 --- a/modules/audio_processing/aec3/decimator.h +++ b/modules/audio_processing/aec3/decimator.h @@ -17,7 +17,6 @@ #include "api/array_view.h" #include "modules/audio_processing/aec3/aec3_common.h" #include "modules/audio_processing/utility/cascaded_biquad_filter.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -26,6 +25,9 @@ class Decimator { public: explicit Decimator(size_t down_sampling_factor); + Decimator(const Decimator&) = delete; + Decimator& operator=(const Decimator&) = delete; + // Downsamples the signal. void Decimate(rtc::ArrayView in, rtc::ArrayView out); @@ -33,8 +35,6 @@ class Decimator { const size_t down_sampling_factor_; CascadedBiQuadFilter anti_aliasing_filter_; CascadedBiQuadFilter noise_reduction_filter_; - - RTC_DISALLOW_COPY_AND_ASSIGN(Decimator); }; } // namespace webrtc diff --git a/modules/audio_processing/aec3/echo_audibility.h b/modules/audio_processing/aec3/echo_audibility.h index 1ffc017b7d..b9d6f87d2a 100644 --- a/modules/audio_processing/aec3/echo_audibility.h +++ b/modules/audio_processing/aec3/echo_audibility.h @@ -19,7 +19,6 @@ #include "modules/audio_processing/aec3/render_buffer.h" #include "modules/audio_processing/aec3/spectrum_buffer.h" #include "modules/audio_processing/aec3/stationarity_estimator.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { diff --git a/modules/audio_processing/aec3/echo_canceller3.cc b/modules/audio_processing/aec3/echo_canceller3.cc index 181b649f6d..419a056d20 100644 --- a/modules/audio_processing/aec3/echo_canceller3.cc +++ b/modules/audio_processing/aec3/echo_canceller3.cc @@ -27,8 +27,8 @@ namespace { enum class EchoCanceller3ApiCall { kCapture, kRender }; bool DetectSaturation(rtc::ArrayView y) { - for (auto y_k : y) { - if (y_k >= 32700.0f || y_k <= -32700.0f) { + for (size_t k = 0; k < y.size(); ++k) { + if (y[k] >= 32700.0f || y[k] <= -32700.0f) { return true; } } @@ -267,22 +267,34 @@ EchoCanceller3Config AdjustConfig(const EchoCanceller3Config& config) { adjusted_cfg.ep_strength.echo_can_saturate = false; } - if (field_trial::IsEnabled("WebRTC-Aec3UseDot2ReverbDefaultLen")) { - adjusted_cfg.ep_strength.default_len = 0.2f; - } else if (field_trial::IsEnabled("WebRTC-Aec3UseDot3ReverbDefaultLen")) { - adjusted_cfg.ep_strength.default_len = 0.3f; - } else if (field_trial::IsEnabled("WebRTC-Aec3UseDot4ReverbDefaultLen")) { - adjusted_cfg.ep_strength.default_len = 0.4f; - } else if (field_trial::IsEnabled("WebRTC-Aec3UseDot5ReverbDefaultLen")) { - adjusted_cfg.ep_strength.default_len = 0.5f; - } else if (field_trial::IsEnabled("WebRTC-Aec3UseDot6ReverbDefaultLen")) { - adjusted_cfg.ep_strength.default_len = 0.6f; - } else if (field_trial::IsEnabled("WebRTC-Aec3UseDot7ReverbDefaultLen")) { - adjusted_cfg.ep_strength.default_len = 0.7f; - } else if (field_trial::IsEnabled("WebRTC-Aec3UseDot8ReverbDefaultLen")) { - adjusted_cfg.ep_strength.default_len = 0.8f; + const std::string use_nearend_reverb_len_tunings = + field_trial::FindFullName("WebRTC-Aec3UseNearendReverbLen"); + FieldTrialParameter nearend_reverb_default_len( + "default_len", adjusted_cfg.ep_strength.default_len); + FieldTrialParameter nearend_reverb_nearend_len( + "nearend_len", adjusted_cfg.ep_strength.nearend_len); + + ParseFieldTrial({&nearend_reverb_default_len, &nearend_reverb_nearend_len}, + use_nearend_reverb_len_tunings); + float default_len = static_cast(nearend_reverb_default_len.Get()); + float nearend_len = static_cast(nearend_reverb_nearend_len.Get()); + if (default_len > -1 && default_len < 1 && nearend_len > -1 && + nearend_len < 1) { + adjusted_cfg.ep_strength.default_len = + static_cast(nearend_reverb_default_len.Get()); + adjusted_cfg.ep_strength.nearend_len = + static_cast(nearend_reverb_nearend_len.Get()); + } + + if (field_trial::IsEnabled("WebRTC-Aec3ConservativeTailFreqResponse")) { + adjusted_cfg.ep_strength.use_conservative_tail_frequency_response = true; + } + + if (field_trial::IsDisabled("WebRTC-Aec3ConservativeTailFreqResponse")) { + adjusted_cfg.ep_strength.use_conservative_tail_frequency_response = false; } + if (field_trial::IsEnabled("WebRTC-Aec3ShortHeadroomKillSwitch")) { // Two blocks headroom. adjusted_cfg.delay.delay_headroom_samples = kBlockSize * 2; @@ -459,8 +471,6 @@ EchoCanceller3Config AdjustConfig(const EchoCanceller3Config& config) { FieldTrialParameter dominant_nearend_detection_trigger_threshold( "dominant_nearend_detection_trigger_threshold", adjusted_cfg.suppressor.dominant_nearend_detection.trigger_threshold); - FieldTrialParameter ep_strength_default_len( - "ep_strength_default_len", adjusted_cfg.ep_strength.default_len); ParseFieldTrial( {&nearend_tuning_mask_lf_enr_transparent, @@ -477,7 +487,7 @@ EchoCanceller3Config AdjustConfig(const EchoCanceller3Config& config) { &dominant_nearend_detection_enr_exit_threshold, &dominant_nearend_detection_snr_threshold, &dominant_nearend_detection_hold_duration, - &dominant_nearend_detection_trigger_threshold, &ep_strength_default_len}, + &dominant_nearend_detection_trigger_threshold}, suppressor_tuning_override_trial_name); adjusted_cfg.suppressor.nearend_tuning.mask_lf.enr_transparent = @@ -514,8 +524,6 @@ EchoCanceller3Config AdjustConfig(const EchoCanceller3Config& config) { dominant_nearend_detection_hold_duration.Get(); adjusted_cfg.suppressor.dominant_nearend_detection.trigger_threshold = dominant_nearend_detection_trigger_threshold.Get(); - adjusted_cfg.ep_strength.default_len = - static_cast(ep_strength_default_len.Get()); // Field trial-based overrides of individual suppressor parameters. RetrieveFieldTrialValue( @@ -577,15 +585,13 @@ EchoCanceller3Config AdjustConfig(const EchoCanceller3Config& config) { "WebRTC-Aec3SuppressorAntiHowlingGainOverride", 0.f, 10.f, &adjusted_cfg.suppressor.high_bands_suppression.anti_howling_gain); - RetrieveFieldTrialValue("WebRTC-Aec3SuppressorEpStrengthDefaultLenOverride", - -1.f, 1.f, &adjusted_cfg.ep_strength.default_len); - // Field trial-based overrides of individual delay estimator parameters. RetrieveFieldTrialValue("WebRTC-Aec3DelayEstimateSmoothingOverride", 0.f, 1.f, &adjusted_cfg.delay.delay_estimate_smoothing); RetrieveFieldTrialValue( "WebRTC-Aec3DelayEstimateSmoothingDelayFoundOverride", 0.f, 1.f, &adjusted_cfg.delay.delay_estimate_smoothing_delay_found); + return adjusted_cfg; } @@ -796,7 +802,7 @@ void EchoCanceller3::ProcessCapture(AudioBuffer* capture, if (linear_output && !linear_output_framer_) { RTC_LOG(LS_ERROR) << "Trying to retrieve the linear AEC output without " "properly configuring AEC3."; - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } // Report capture call in the metrics and periodically update API call diff --git a/modules/audio_processing/aec3/echo_canceller3_unittest.cc b/modules/audio_processing/aec3/echo_canceller3_unittest.cc index 4a3c466712..b405e0ca61 100644 --- a/modules/audio_processing/aec3/echo_canceller3_unittest.cc +++ b/modules/audio_processing/aec3/echo_canceller3_unittest.cc @@ -697,23 +697,6 @@ TEST(EchoCanceller3Messaging, EchoLeakage) { } } -// Tests the parameter functionality for the field trial override for the -// default_len parameter. -TEST(EchoCanceller3FieldTrials, Aec3SuppressorEpStrengthDefaultLenOverride) { - EchoCanceller3Config default_config; - EchoCanceller3Config adjusted_config = AdjustConfig(default_config); - ASSERT_EQ(default_config.ep_strength.default_len, - adjusted_config.ep_strength.default_len); - - webrtc::test::ScopedFieldTrials field_trials( - "WebRTC-Aec3SuppressorEpStrengthDefaultLenOverride/-0.02/"); - adjusted_config = AdjustConfig(default_config); - - ASSERT_NE(default_config.ep_strength.default_len, - adjusted_config.ep_strength.default_len); - EXPECT_FLOAT_EQ(-0.02f, adjusted_config.ep_strength.default_len); -} - // Tests the parameter functionality for the field trial override for the // anti-howling gain. TEST(EchoCanceller3FieldTrials, Aec3SuppressorAntiHowlingGainOverride) { @@ -767,7 +750,7 @@ TEST(EchoCanceller3FieldTrials, Aec3SuppressorTuningOverrideAllParams) { "detection_enr_threshold:1.3,dominant_nearend_detection_enr_exit_" "threshold:1.4,dominant_nearend_detection_snr_threshold:1.5,dominant_" "nearend_detection_hold_duration:10,dominant_nearend_detection_trigger_" - "threshold:11,ep_strength_default_len:1.6/"); + "threshold:11/"); EchoCanceller3Config default_config; EchoCanceller3Config adjusted_config = AdjustConfig(default_config); @@ -808,8 +791,6 @@ TEST(EchoCanceller3FieldTrials, Aec3SuppressorTuningOverrideAllParams) { ASSERT_NE( adjusted_config.suppressor.dominant_nearend_detection.trigger_threshold, default_config.suppressor.dominant_nearend_detection.trigger_threshold); - ASSERT_NE(adjusted_config.ep_strength.default_len, - default_config.ep_strength.default_len); EXPECT_FLOAT_EQ( adjusted_config.suppressor.nearend_tuning.mask_lf.enr_transparent, 0.1); @@ -846,7 +827,6 @@ TEST(EchoCanceller3FieldTrials, Aec3SuppressorTuningOverrideAllParams) { EXPECT_EQ( adjusted_config.suppressor.dominant_nearend_detection.trigger_threshold, 11); - EXPECT_FLOAT_EQ(adjusted_config.ep_strength.default_len, 1.6); } // Testing the field trial-based override of the suppressor parameters for @@ -900,6 +880,16 @@ TEST(EchoCanceller3FieldTrials, Aec3SuppressorTuningOverrideOneParam) { 0.5); } +// Testing the field trial-based that override the exponential decay parameters. +TEST(EchoCanceller3FieldTrials, Aec3UseNearendReverb) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-Aec3UseNearendReverbLen/default_len:0.9,nearend_len:0.8/"); + EchoCanceller3Config default_config; + EchoCanceller3Config adjusted_config = AdjustConfig(default_config); + EXPECT_FLOAT_EQ(adjusted_config.ep_strength.default_len, 0.9); + EXPECT_FLOAT_EQ(adjusted_config.ep_strength.nearend_len, 0.8); +} + #if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) TEST(EchoCanceller3InputCheckDeathTest, WrongCaptureNumBandsCheckVerification) { diff --git a/modules/audio_processing/aec3/echo_path_delay_estimator.h b/modules/audio_processing/aec3/echo_path_delay_estimator.h index 6c8c21282e..d8f97757bb 100644 --- a/modules/audio_processing/aec3/echo_path_delay_estimator.h +++ b/modules/audio_processing/aec3/echo_path_delay_estimator.h @@ -21,7 +21,6 @@ #include "modules/audio_processing/aec3/delay_estimate.h" #include "modules/audio_processing/aec3/matched_filter.h" #include "modules/audio_processing/aec3/matched_filter_lag_aggregator.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -37,6 +36,9 @@ class EchoPathDelayEstimator { size_t num_capture_channels); ~EchoPathDelayEstimator(); + EchoPathDelayEstimator(const EchoPathDelayEstimator&) = delete; + EchoPathDelayEstimator& operator=(const EchoPathDelayEstimator&) = delete; + // Resets the estimation. If the delay confidence is reset, the reset behavior // is as if the call is restarted. void Reset(bool reset_delay_confidence); @@ -71,8 +73,6 @@ class EchoPathDelayEstimator { // Internal reset method with more granularity. void Reset(bool reset_lag_aggregator, bool reset_delay_confidence); - - RTC_DISALLOW_COPY_AND_ASSIGN(EchoPathDelayEstimator); }; } // namespace webrtc diff --git a/modules/audio_processing/aec3/echo_remover_metrics.cc b/modules/audio_processing/aec3/echo_remover_metrics.cc index 1ceb329d38..c3fc80773a 100644 --- a/modules/audio_processing/aec3/echo_remover_metrics.cc +++ b/modules/audio_processing/aec3/echo_remover_metrics.cc @@ -113,7 +113,7 @@ void EchoRemoverMetrics::Update( ResetMetrics(); break; default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); break; } } diff --git a/modules/audio_processing/aec3/echo_remover_metrics.h b/modules/audio_processing/aec3/echo_remover_metrics.h index c3d8e20da1..aec8084d78 100644 --- a/modules/audio_processing/aec3/echo_remover_metrics.h +++ b/modules/audio_processing/aec3/echo_remover_metrics.h @@ -15,7 +15,6 @@ #include "modules/audio_processing/aec3/aec3_common.h" #include "modules/audio_processing/aec3/aec_state.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -34,6 +33,9 @@ class EchoRemoverMetrics { EchoRemoverMetrics(); + EchoRemoverMetrics(const EchoRemoverMetrics&) = delete; + EchoRemoverMetrics& operator=(const EchoRemoverMetrics&) = delete; + // Updates the metric with new data. void Update( const AecState& aec_state, @@ -52,8 +54,6 @@ class EchoRemoverMetrics { DbMetric erle_time_domain_; bool saturated_capture_ = false; bool metrics_reported_ = false; - - RTC_DISALLOW_COPY_AND_ASSIGN(EchoRemoverMetrics); }; namespace aec3 { diff --git a/modules/audio_processing/aec3/erl_estimator.h b/modules/audio_processing/aec3/erl_estimator.h index 89bf6ace36..639a52c561 100644 --- a/modules/audio_processing/aec3/erl_estimator.h +++ b/modules/audio_processing/aec3/erl_estimator.h @@ -18,7 +18,6 @@ #include "api/array_view.h" #include "modules/audio_processing/aec3/aec3_common.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -28,6 +27,9 @@ class ErlEstimator { explicit ErlEstimator(size_t startup_phase_length_blocks_); ~ErlEstimator(); + ErlEstimator(const ErlEstimator&) = delete; + ErlEstimator& operator=(const ErlEstimator&) = delete; + // Resets the ERL estimation. void Reset(); @@ -49,7 +51,6 @@ class ErlEstimator { float erl_time_domain_; int hold_counter_time_domain_; size_t blocks_since_reset_ = 0; - RTC_DISALLOW_COPY_AND_ASSIGN(ErlEstimator); }; } // namespace webrtc diff --git a/modules/audio_processing/aec3/filter_analyzer.cc b/modules/audio_processing/aec3/filter_analyzer.cc index be954d3a18..c07e5c8648 100644 --- a/modules/audio_processing/aec3/filter_analyzer.cc +++ b/modules/audio_processing/aec3/filter_analyzer.cc @@ -170,11 +170,16 @@ void FilterAnalyzer::PreProcessFilters( std::fill(h_highpass_[ch].begin() + region_.start_sample_, h_highpass_[ch].begin() + region_.end_sample_ + 1, 0.f); + float* h_highpass_ch = h_highpass_[ch].data(); + const float* filters_time_domain_ch = filters_time_domain[ch].data(); + const size_t region_end = region_.end_sample_; for (size_t k = std::max(h.size() - 1, region_.start_sample_); - k <= region_.end_sample_; ++k) { + k <= region_end; ++k) { + float tmp = h_highpass_ch[k]; for (size_t j = 0; j < h.size(); ++j) { - h_highpass_[ch][k] += filters_time_domain[ch][k - j] * h[j]; + tmp += filters_time_domain_ch[k - j] * h[j]; } + h_highpass_ch[k] = tmp; } } } @@ -230,19 +235,23 @@ bool FilterAnalyzer::ConsistentFilterDetector::Detect( peak_index > filter_to_analyze.size() - 129 ? 0 : peak_index + 128; } + float filter_floor_accum = filter_floor_accum_; + float filter_secondary_peak = filter_secondary_peak_; for (size_t k = region.start_sample_; k < std::min(region.end_sample_ + 1, filter_floor_low_limit_); ++k) { float abs_h = fabsf(filter_to_analyze[k]); - filter_floor_accum_ += abs_h; - filter_secondary_peak_ = std::max(filter_secondary_peak_, abs_h); + filter_floor_accum += abs_h; + filter_secondary_peak = std::max(filter_secondary_peak, abs_h); } for (size_t k = std::max(filter_floor_high_limit_, region.start_sample_); k <= region.end_sample_; ++k) { float abs_h = fabsf(filter_to_analyze[k]); - filter_floor_accum_ += abs_h; - filter_secondary_peak_ = std::max(filter_secondary_peak_, abs_h); + filter_floor_accum += abs_h; + filter_secondary_peak = std::max(filter_secondary_peak, abs_h); } + filter_floor_accum_ = filter_floor_accum; + filter_secondary_peak_ = filter_secondary_peak; if (region.end_sample_ == filter_to_analyze.size() - 1) { float filter_floor = filter_floor_accum_ / diff --git a/modules/audio_processing/aec3/filter_analyzer.h b/modules/audio_processing/aec3/filter_analyzer.h index b0b7070119..e05fb71138 100644 --- a/modules/audio_processing/aec3/filter_analyzer.h +++ b/modules/audio_processing/aec3/filter_analyzer.h @@ -20,7 +20,6 @@ #include "api/array_view.h" #include "api/audio/echo_canceller3_config.h" #include "modules/audio_processing/aec3/aec3_common.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { diff --git a/modules/audio_processing/aec3/fullband_erle_estimator.h b/modules/audio_processing/aec3/fullband_erle_estimator.h index 2b720a4de4..7a082176d6 100644 --- a/modules/audio_processing/aec3/fullband_erle_estimator.h +++ b/modules/audio_processing/aec3/fullband_erle_estimator.h @@ -67,7 +67,7 @@ class FullBandErleEstimator { // Updates the estimator with a new point, returns true // if the instantaneous ERLE was updated due to having enough // points for performing the estimate. - bool Update(const float Y2_sum, const float E2_sum); + bool Update(float Y2_sum, float E2_sum); // Resets the instantaneous ERLE estimator to its initial state. void Reset(); // Resets the members related with an instantaneous estimate. diff --git a/modules/audio_processing/aec3/matched_filter.cc b/modules/audio_processing/aec3/matched_filter.cc index 1721e9c983..faca933856 100644 --- a/modules/audio_processing/aec3/matched_filter.cc +++ b/modules/audio_processing/aec3/matched_filter.cc @@ -166,7 +166,9 @@ void MatchedFilterCore_SSE2(size_t x_start_index, // Initialize values for the accumulation. __m128 s_128 = _mm_set1_ps(0); + __m128 s_128_4 = _mm_set1_ps(0); __m128 x2_sum_128 = _mm_set1_ps(0); + __m128 x2_sum_128_4 = _mm_set1_ps(0); float x2_sum = 0.f; float s = 0; @@ -179,20 +181,26 @@ void MatchedFilterCore_SSE2(size_t x_start_index, const int chunk2 = h_size - chunk1; for (int limit : {chunk1, chunk2}) { // Perform 128 bit vector operations. - const int limit_by_4 = limit >> 2; - for (int k = limit_by_4; k > 0; --k, h_p += 4, x_p += 4) { + const int limit_by_8 = limit >> 3; + for (int k = limit_by_8; k > 0; --k, h_p += 8, x_p += 8) { // Load the data into 128 bit vectors. const __m128 x_k = _mm_loadu_ps(x_p); const __m128 h_k = _mm_loadu_ps(h_p); + const __m128 x_k_4 = _mm_loadu_ps(x_p + 4); + const __m128 h_k_4 = _mm_loadu_ps(h_p + 4); const __m128 xx = _mm_mul_ps(x_k, x_k); + const __m128 xx_4 = _mm_mul_ps(x_k_4, x_k_4); // Compute and accumulate x * x and h * x. x2_sum_128 = _mm_add_ps(x2_sum_128, xx); + x2_sum_128_4 = _mm_add_ps(x2_sum_128_4, xx_4); const __m128 hx = _mm_mul_ps(h_k, x_k); + const __m128 hx_4 = _mm_mul_ps(h_k_4, x_k_4); s_128 = _mm_add_ps(s_128, hx); + s_128_4 = _mm_add_ps(s_128_4, hx_4); } // Perform non-vector operations for any remaining items. - for (int k = limit - limit_by_4 * 4; k > 0; --k, ++h_p, ++x_p) { + for (int k = limit - limit_by_8 * 8; k > 0; --k, ++h_p, ++x_p) { const float x_k = *x_p; x2_sum += x_k * x_k; s += *h_p * x_k; @@ -202,8 +210,10 @@ void MatchedFilterCore_SSE2(size_t x_start_index, } // Combine the accumulated vector and scalar values. + x2_sum_128 = _mm_add_ps(x2_sum_128, x2_sum_128_4); float* v = reinterpret_cast(&x2_sum_128); x2_sum += v[0] + v[1] + v[2] + v[3]; + s_128 = _mm_add_ps(s_128, s_128_4); v = reinterpret_cast(&s_128); s += v[0] + v[1] + v[2] + v[3]; @@ -298,6 +308,41 @@ void MatchedFilterCore(size_t x_start_index, } } +size_t MaxSquarePeakIndex(rtc::ArrayView h) { + if (h.size() < 2) { + return 0; + } + float max_element1 = h[0] * h[0]; + float max_element2 = h[1] * h[1]; + size_t lag_estimate1 = 0; + size_t lag_estimate2 = 1; + const size_t last_index = h.size() - 1; + // Keeping track of even & odd max elements separately typically allows the + // compiler to produce more efficient code. + for (size_t k = 2; k < last_index; k += 2) { + float element1 = h[k] * h[k]; + float element2 = h[k + 1] * h[k + 1]; + if (element1 > max_element1) { + max_element1 = element1; + lag_estimate1 = k; + } + if (element2 > max_element2) { + max_element2 = element2; + lag_estimate2 = k + 1; + } + } + if (max_element2 > max_element1) { + max_element1 = max_element2; + lag_estimate1 = lag_estimate2; + } + // In case of odd h size, we have not yet checked the last element. + float last_element = h[last_index] * h[last_index]; + if (last_element > max_element1) { + return last_index; + } + return lag_estimate1; +} + } // namespace aec3 MatchedFilter::MatchedFilter(ApmDataDumper* data_dumper, @@ -390,17 +435,15 @@ void MatchedFilter::Update(const DownsampledRenderBuffer& render_buffer, } // Compute anchor for the matched filter error. - const float error_sum_anchor = - std::inner_product(y.begin(), y.end(), y.begin(), 0.f); + float error_sum_anchor = 0.0f; + for (size_t k = 0; k < y.size(); ++k) { + error_sum_anchor += y[k] * y[k]; + } // Estimate the lag in the matched filter as the distance to the portion in // the filter that contributes the most to the matched filter output. This // is detected as the peak of the matched filter. - const size_t lag_estimate = std::distance( - filters_[n].begin(), - std::max_element( - filters_[n].begin(), filters_[n].end(), - [](float a, float b) -> bool { return a * a < b * b; })); + const size_t lag_estimate = aec3::MaxSquarePeakIndex(filters_[n]); // Update the lag estimates for the matched filter. lag_estimates_[n] = LagEstimate( @@ -442,7 +485,7 @@ void MatchedFilter::Update(const DownsampledRenderBuffer& render_buffer, data_dumper_->DumpRaw("aec3_correlator_9_h", filters_[9]); break; default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } alignment_shift += filter_intra_lag_shift_; diff --git a/modules/audio_processing/aec3/matched_filter.h b/modules/audio_processing/aec3/matched_filter.h index c6410ab4ee..dd4a678394 100644 --- a/modules/audio_processing/aec3/matched_filter.h +++ b/modules/audio_processing/aec3/matched_filter.h @@ -74,6 +74,9 @@ void MatchedFilterCore(size_t x_start_index, bool* filters_updated, float* error_sum); +// Find largest peak of squared values in array. +size_t MaxSquarePeakIndex(rtc::ArrayView h); + } // namespace aec3 // Produces recursively updated cross-correlation estimates for several signal diff --git a/modules/audio_processing/aec3/matched_filter_avx2.cc b/modules/audio_processing/aec3/matched_filter_avx2.cc index ed32102aa4..8b7010f1dc 100644 --- a/modules/audio_processing/aec3/matched_filter_avx2.cc +++ b/modules/audio_processing/aec3/matched_filter_avx2.cc @@ -39,7 +39,9 @@ void MatchedFilterCore_AVX2(size_t x_start_index, // Initialize values for the accumulation. __m256 s_256 = _mm256_set1_ps(0); + __m256 s_256_8 = _mm256_set1_ps(0); __m256 x2_sum_256 = _mm256_set1_ps(0); + __m256 x2_sum_256_8 = _mm256_set1_ps(0); float x2_sum = 0.f; float s = 0; @@ -52,18 +54,22 @@ void MatchedFilterCore_AVX2(size_t x_start_index, const int chunk2 = h_size - chunk1; for (int limit : {chunk1, chunk2}) { // Perform 256 bit vector operations. - const int limit_by_8 = limit >> 3; - for (int k = limit_by_8; k > 0; --k, h_p += 8, x_p += 8) { + const int limit_by_16 = limit >> 4; + for (int k = limit_by_16; k > 0; --k, h_p += 16, x_p += 16) { // Load the data into 256 bit vectors. __m256 x_k = _mm256_loadu_ps(x_p); __m256 h_k = _mm256_loadu_ps(h_p); + __m256 x_k_8 = _mm256_loadu_ps(x_p + 8); + __m256 h_k_8 = _mm256_loadu_ps(h_p + 8); // Compute and accumulate x * x and h * x. x2_sum_256 = _mm256_fmadd_ps(x_k, x_k, x2_sum_256); + x2_sum_256_8 = _mm256_fmadd_ps(x_k_8, x_k_8, x2_sum_256_8); s_256 = _mm256_fmadd_ps(h_k, x_k, s_256); + s_256_8 = _mm256_fmadd_ps(h_k_8, x_k_8, s_256_8); } // Perform non-vector operations for any remaining items. - for (int k = limit - limit_by_8 * 8; k > 0; --k, ++h_p, ++x_p) { + for (int k = limit - limit_by_16 * 16; k > 0; --k, ++h_p, ++x_p) { const float x_k = *x_p; x2_sum += x_k * x_k; s += *h_p * x_k; @@ -73,6 +79,8 @@ void MatchedFilterCore_AVX2(size_t x_start_index, } // Sum components together. + x2_sum_256 = _mm256_add_ps(x2_sum_256, x2_sum_256_8); + s_256 = _mm256_add_ps(s_256, s_256_8); __m128 x2_sum_128 = _mm_add_ps(_mm256_extractf128_ps(x2_sum_256, 0), _mm256_extractf128_ps(x2_sum_256, 1)); __m128 s_128 = _mm_add_ps(_mm256_extractf128_ps(s_256, 0), diff --git a/modules/audio_processing/aec3/matched_filter_unittest.cc b/modules/audio_processing/aec3/matched_filter_unittest.cc index 37b51fa624..8abfb69a7a 100644 --- a/modules/audio_processing/aec3/matched_filter_unittest.cc +++ b/modules/audio_processing/aec3/matched_filter_unittest.cc @@ -176,6 +176,28 @@ TEST(MatchedFilter, TestAvx2Optimizations) { #endif +// Verifies that the (optimized) function MaxSquarePeakIndex() produces output +// equal to the corresponding std-functions. +TEST(MatchedFilter, MaxSquarePeakIndex) { + Random random_generator(42U); + constexpr int kMaxLength = 128; + constexpr int kNumIterationsPerLength = 256; + for (int length = 1; length < kMaxLength; ++length) { + std::vector y(length); + for (int i = 0; i < kNumIterationsPerLength; ++i) { + RandomizeSampleVector(&random_generator, y); + + size_t lag_from_function = MaxSquarePeakIndex(y); + size_t lag_from_std = std::distance( + y.begin(), + std::max_element(y.begin(), y.end(), [](float a, float b) -> bool { + return a * a < b * b; + })); + EXPECT_EQ(lag_from_function, lag_from_std); + } + } +} + // Verifies that the matched filter produces proper lag estimates for // artificially // delayed signals. diff --git a/modules/audio_processing/aec3/render_buffer.cc b/modules/audio_processing/aec3/render_buffer.cc index 60ea69cce1..aa511e2b6b 100644 --- a/modules/audio_processing/aec3/render_buffer.cc +++ b/modules/audio_processing/aec3/render_buffer.cc @@ -42,8 +42,9 @@ void RenderBuffer::SpectralSum( int position = spectrum_buffer_->read; for (size_t j = 0; j < num_spectra; ++j) { for (const auto& channel_spectrum : spectrum_buffer_->buffer[position]) { - std::transform(X2->begin(), X2->end(), channel_spectrum.begin(), - X2->begin(), std::plus()); + for (size_t k = 0; k < X2->size(); ++k) { + (*X2)[k] += channel_spectrum[k]; + } } position = spectrum_buffer_->IncIndex(position); } @@ -60,18 +61,18 @@ void RenderBuffer::SpectralSums( size_t j = 0; for (; j < num_spectra_shorter; ++j) { for (const auto& channel_spectrum : spectrum_buffer_->buffer[position]) { - std::transform(X2_shorter->begin(), X2_shorter->end(), - channel_spectrum.begin(), X2_shorter->begin(), - std::plus()); + for (size_t k = 0; k < X2_shorter->size(); ++k) { + (*X2_shorter)[k] += channel_spectrum[k]; + } } position = spectrum_buffer_->IncIndex(position); } std::copy(X2_shorter->begin(), X2_shorter->end(), X2_longer->begin()); for (; j < num_spectra_longer; ++j) { for (const auto& channel_spectrum : spectrum_buffer_->buffer[position]) { - std::transform(X2_longer->begin(), X2_longer->end(), - channel_spectrum.begin(), X2_longer->begin(), - std::plus()); + for (size_t k = 0; k < X2_longer->size(); ++k) { + (*X2_longer)[k] += channel_spectrum[k]; + } } position = spectrum_buffer_->IncIndex(position); } diff --git a/modules/audio_processing/aec3/render_delay_controller_metrics.h b/modules/audio_processing/aec3/render_delay_controller_metrics.h index 8c527a142e..309122d80d 100644 --- a/modules/audio_processing/aec3/render_delay_controller_metrics.h +++ b/modules/audio_processing/aec3/render_delay_controller_metrics.h @@ -15,7 +15,6 @@ #include "absl/types/optional.h" #include "modules/audio_processing/aec3/clockdrift_detector.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -24,6 +23,10 @@ class RenderDelayControllerMetrics { public: RenderDelayControllerMetrics(); + RenderDelayControllerMetrics(const RenderDelayControllerMetrics&) = delete; + RenderDelayControllerMetrics& operator=(const RenderDelayControllerMetrics&) = + delete; + // Updates the metric with new data. void Update(absl::optional delay_samples, size_t buffer_delay_blocks, @@ -46,8 +49,6 @@ class RenderDelayControllerMetrics { bool metrics_reported_ = false; bool initial_update = true; int skew_shift_count_ = 0; - - RTC_DISALLOW_COPY_AND_ASSIGN(RenderDelayControllerMetrics); }; } // namespace webrtc diff --git a/modules/audio_processing/aec3/render_signal_analyzer.h b/modules/audio_processing/aec3/render_signal_analyzer.h index c7a3d8b7a0..2e4aaa4ba7 100644 --- a/modules/audio_processing/aec3/render_signal_analyzer.h +++ b/modules/audio_processing/aec3/render_signal_analyzer.h @@ -20,7 +20,6 @@ #include "modules/audio_processing/aec3/aec3_common.h" #include "modules/audio_processing/aec3/render_buffer.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -30,6 +29,9 @@ class RenderSignalAnalyzer { explicit RenderSignalAnalyzer(const EchoCanceller3Config& config); ~RenderSignalAnalyzer(); + RenderSignalAnalyzer(const RenderSignalAnalyzer&) = delete; + RenderSignalAnalyzer& operator=(const RenderSignalAnalyzer&) = delete; + // Updates the render signal analysis with the most recent render signal. void Update(const RenderBuffer& render_buffer, const absl::optional& delay_partitions); @@ -53,8 +55,6 @@ class RenderSignalAnalyzer { std::array narrow_band_counters_; absl::optional narrow_peak_band_; size_t narrow_peak_counter_; - - RTC_DISALLOW_COPY_AND_ASSIGN(RenderSignalAnalyzer); }; } // namespace webrtc diff --git a/modules/audio_processing/aec3/residual_echo_estimator.cc b/modules/audio_processing/aec3/residual_echo_estimator.cc index 15bebecb5f..640a3e3cb9 100644 --- a/modules/audio_processing/aec3/residual_echo_estimator.cc +++ b/modules/audio_processing/aec3/residual_echo_estimator.cc @@ -203,7 +203,8 @@ void ResidualEchoEstimator::Estimate( LinearEstimate(S2_linear, aec_state.ErleUnbounded(), R2_unbounded); } - UpdateReverb(ReverbType::kLinear, aec_state, render_buffer); + UpdateReverb(ReverbType::kLinear, aec_state, render_buffer, + dominant_nearend); AddReverb(R2); AddReverb(R2_unbounded); } else { @@ -240,7 +241,8 @@ void ResidualEchoEstimator::Estimate( if (config_.echo_model.model_reverb_in_nonlinear_mode && !aec_state.TransparentModeActive()) { - UpdateReverb(ReverbType::kNonLinear, aec_state, render_buffer); + UpdateReverb(ReverbType::kNonLinear, aec_state, render_buffer, + dominant_nearend); AddReverb(R2); AddReverb(R2_unbounded); } @@ -305,7 +307,8 @@ void ResidualEchoEstimator::UpdateRenderNoisePower( // Updates the reverb estimation. void ResidualEchoEstimator::UpdateReverb(ReverbType reverb_type, const AecState& aec_state, - const RenderBuffer& render_buffer) { + const RenderBuffer& render_buffer, + bool dominant_nearend) { // Choose reverb partition based on what type of echo power model is used. const size_t first_reverb_partition = reverb_type == ReverbType::kLinear @@ -330,15 +333,15 @@ void ResidualEchoEstimator::UpdateReverb(ReverbType reverb_type, } // Update the reverb estimate. + float reverb_decay = aec_state.ReverbDecay(/*mild=*/dominant_nearend); if (reverb_type == ReverbType::kLinear) { - echo_reverb_.UpdateReverb(render_power, - aec_state.GetReverbFrequencyResponse(), - aec_state.ReverbDecay()); + echo_reverb_.UpdateReverb( + render_power, aec_state.GetReverbFrequencyResponse(), reverb_decay); } else { const float echo_path_gain = GetEchoPathGain(aec_state, /*gain_for_early_reflections=*/false); echo_reverb_.UpdateReverbNoFreqShaping(render_power, echo_path_gain, - aec_state.ReverbDecay()); + reverb_decay); } } // Adds the estimated power of the reverb to the residual echo power. diff --git a/modules/audio_processing/aec3/residual_echo_estimator.h b/modules/audio_processing/aec3/residual_echo_estimator.h index c071854c4a..c468764002 100644 --- a/modules/audio_processing/aec3/residual_echo_estimator.h +++ b/modules/audio_processing/aec3/residual_echo_estimator.h @@ -56,7 +56,8 @@ class ResidualEchoEstimator { // Updates the reverb estimation. void UpdateReverb(ReverbType reverb_type, const AecState& aec_state, - const RenderBuffer& render_buffer); + const RenderBuffer& render_buffer, + bool dominant_nearend); // Adds the estimated unmodelled echo power to the residual echo power // estimate. diff --git a/modules/audio_processing/aec3/residual_echo_estimator_unittest.cc b/modules/audio_processing/aec3/residual_echo_estimator_unittest.cc index 3d760b7dda..05a610335b 100644 --- a/modules/audio_processing/aec3/residual_echo_estimator_unittest.cc +++ b/modules/audio_processing/aec3/residual_echo_estimator_unittest.cc @@ -10,6 +10,8 @@ #include "modules/audio_processing/aec3/residual_echo_estimator.h" +#include + #include "api/audio/echo_canceller3_config.h" #include "modules/audio_processing/aec3/aec3_fft.h" #include "modules/audio_processing/aec3/aec_state.h" @@ -21,6 +23,109 @@ namespace webrtc { +namespace { +constexpr int kSampleRateHz = 48000; +constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); +constexpr float kEpsilon = 1e-4f; +} // namespace + +class ResidualEchoEstimatorTest { + public: + ResidualEchoEstimatorTest(size_t num_render_channels, + size_t num_capture_channels, + const EchoCanceller3Config& config) + : num_render_channels_(num_render_channels), + num_capture_channels_(num_capture_channels), + config_(config), + estimator_(config_, num_render_channels_), + aec_state_(config_, num_capture_channels_), + render_delay_buffer_(RenderDelayBuffer::Create(config_, + kSampleRateHz, + num_render_channels_)), + E2_refined_(num_capture_channels_), + S2_linear_(num_capture_channels_), + Y2_(num_capture_channels_), + R2_(num_capture_channels_), + R2_unbounded_(num_capture_channels_), + x_(kNumBands, + std::vector>( + num_render_channels_, + std::vector(kBlockSize, 0.0f))), + H2_(num_capture_channels_, + std::vector>(10)), + h_(num_capture_channels_, + std::vector( + GetTimeDomainLength(config_.filter.refined.length_blocks), + 0.0f)), + random_generator_(42U), + output_(num_capture_channels_) { + for (auto& H2_ch : H2_) { + for (auto& H2_k : H2_ch) { + H2_k.fill(0.01f); + } + H2_ch[2].fill(10.f); + H2_ch[2][0] = 0.1f; + } + + for (auto& subtractor_output : output_) { + subtractor_output.Reset(); + subtractor_output.s_refined.fill(100.f); + } + y_.fill(0.f); + + constexpr float kLevel = 10.f; + for (auto& E2_refined_ch : E2_refined_) { + E2_refined_ch.fill(kLevel); + } + S2_linear_[0].fill(kLevel); + for (auto& Y2_ch : Y2_) { + Y2_ch.fill(kLevel); + } + } + + void RunOneFrame(bool dominant_nearend) { + RandomizeSampleVector(&random_generator_, x_[0][0]); + render_delay_buffer_->Insert(x_); + if (first_frame_) { + render_delay_buffer_->Reset(); + first_frame_ = false; + } + render_delay_buffer_->PrepareCaptureProcessing(); + + aec_state_.Update(delay_estimate_, H2_, h_, + *render_delay_buffer_->GetRenderBuffer(), E2_refined_, + Y2_, output_); + + estimator_.Estimate(aec_state_, *render_delay_buffer_->GetRenderBuffer(), + S2_linear_, Y2_, dominant_nearend, R2_, R2_unbounded_); + } + + rtc::ArrayView> R2() const { + return R2_; + } + + private: + const size_t num_render_channels_; + const size_t num_capture_channels_; + const EchoCanceller3Config& config_; + ResidualEchoEstimator estimator_; + AecState aec_state_; + std::unique_ptr render_delay_buffer_; + std::vector> E2_refined_; + std::vector> S2_linear_; + std::vector> Y2_; + std::vector> R2_; + std::vector> R2_unbounded_; + std::vector>> x_; + std::vector>> H2_; + std::vector> h_; + Random random_generator_; + std::vector output_; + std::array y_; + absl::optional delay_estimate_; + bool first_frame_ = true; +}; + class ResidualEchoEstimatorMultiChannel : public ::testing::Test, public ::testing::WithParamInterface> {}; @@ -33,77 +138,63 @@ INSTANTIATE_TEST_SUITE_P(MultiChannel, TEST_P(ResidualEchoEstimatorMultiChannel, BasicTest) { const size_t num_render_channels = std::get<0>(GetParam()); const size_t num_capture_channels = std::get<1>(GetParam()); - constexpr int kSampleRateHz = 48000; - constexpr size_t kNumBands = NumBandsForRate(kSampleRateHz); EchoCanceller3Config config; - ResidualEchoEstimator estimator(config, num_render_channels); - AecState aec_state(config, num_capture_channels); - std::unique_ptr render_delay_buffer( - RenderDelayBuffer::Create(config, kSampleRateHz, num_render_channels)); - - std::vector> E2_refined( - num_capture_channels); - std::vector> S2_linear( - num_capture_channels); - std::vector> Y2(num_capture_channels); - std::vector> R2(num_capture_channels); - std::vector> R2_unbounded( - num_capture_channels); - std::vector>> x( - kNumBands, std::vector>( - num_render_channels, std::vector(kBlockSize, 0.f))); - std::vector>> H2( - num_capture_channels, - std::vector>(10)); - Random random_generator(42U); - std::vector output(num_capture_channels); - std::array y; - absl::optional delay_estimate; - - for (auto& H2_ch : H2) { - for (auto& H2_k : H2_ch) { - H2_k.fill(0.01f); - } - H2_ch[2].fill(10.f); - H2_ch[2][0] = 0.1f; - } - - std::vector> h( - num_capture_channels, - std::vector( - GetTimeDomainLength(config.filter.refined.length_blocks), 0.f)); - - for (auto& subtractor_output : output) { - subtractor_output.Reset(); - subtractor_output.s_refined.fill(100.f); - } - y.fill(0.f); - - constexpr float kLevel = 10.f; - for (auto& E2_refined_ch : E2_refined) { - E2_refined_ch.fill(kLevel); - } - S2_linear[0].fill(kLevel); - for (auto& Y2_ch : Y2) { - Y2_ch.fill(kLevel); + ResidualEchoEstimatorTest residual_echo_estimator_test( + num_render_channels, num_capture_channels, config); + for (int k = 0; k < 1993; ++k) { + residual_echo_estimator_test.RunOneFrame(/*dominant_nearend=*/false); } +} - for (int k = 0; k < 1993; ++k) { - RandomizeSampleVector(&random_generator, x[0][0]); - render_delay_buffer->Insert(x); - if (k == 0) { - render_delay_buffer->Reset(); +TEST(ResidualEchoEstimatorMultiChannel, ReverbTest) { + const size_t num_render_channels = 1; + const size_t num_capture_channels = 1; + const size_t nFrames = 100; + + EchoCanceller3Config reference_config; + reference_config.ep_strength.default_len = 0.95f; + reference_config.ep_strength.nearend_len = 0.95f; + EchoCanceller3Config config_use_nearend_len = reference_config; + config_use_nearend_len.ep_strength.default_len = 0.95f; + config_use_nearend_len.ep_strength.nearend_len = 0.83f; + + ResidualEchoEstimatorTest reference_residual_echo_estimator_test( + num_render_channels, num_capture_channels, reference_config); + ResidualEchoEstimatorTest use_nearend_len_residual_echo_estimator_test( + num_render_channels, num_capture_channels, config_use_nearend_len); + + std::vector acum_energy_reference_R2(num_capture_channels, 0.0f); + std::vector acum_energy_R2(num_capture_channels, 0.0f); + for (size_t frame = 0; frame < nFrames; ++frame) { + bool dominant_nearend = frame <= nFrames / 2 ? false : true; + reference_residual_echo_estimator_test.RunOneFrame(dominant_nearend); + use_nearend_len_residual_echo_estimator_test.RunOneFrame(dominant_nearend); + const auto& reference_R2 = reference_residual_echo_estimator_test.R2(); + const auto& R2 = use_nearend_len_residual_echo_estimator_test.R2(); + ASSERT_EQ(reference_R2.size(), R2.size()); + for (size_t ch = 0; ch < reference_R2.size(); ++ch) { + float energy_reference_R2 = std::accumulate( + reference_R2[ch].cbegin(), reference_R2[ch].cend(), 0.0f); + float energy_R2 = std::accumulate(R2[ch].cbegin(), R2[ch].cend(), 0.0f); + if (dominant_nearend) { + EXPECT_GE(energy_reference_R2, energy_R2); + } else { + EXPECT_NEAR(energy_reference_R2, energy_R2, kEpsilon); + } + acum_energy_reference_R2[ch] += energy_reference_R2; + acum_energy_R2[ch] += energy_R2; + } + if (frame == nFrames / 2 || frame == nFrames - 1) { + for (size_t ch = 0; ch < acum_energy_reference_R2.size(); ch++) { + if (dominant_nearend) { + EXPECT_GT(acum_energy_reference_R2[ch], acum_energy_R2[ch]); + } else { + EXPECT_NEAR(acum_energy_reference_R2[ch], acum_energy_R2[ch], + kEpsilon); + } + } } - render_delay_buffer->PrepareCaptureProcessing(); - - aec_state.Update(delay_estimate, H2, h, - *render_delay_buffer->GetRenderBuffer(), E2_refined, Y2, - output); - - estimator.Estimate(aec_state, *render_delay_buffer->GetRenderBuffer(), - S2_linear, Y2, /*dominant_nearend=*/false, R2, - R2_unbounded); } } diff --git a/modules/audio_processing/aec3/reverb_decay_estimator.cc b/modules/audio_processing/aec3/reverb_decay_estimator.cc index f160b835f0..2daf376911 100644 --- a/modules/audio_processing/aec3/reverb_decay_estimator.cc +++ b/modules/audio_processing/aec3/reverb_decay_estimator.cc @@ -93,7 +93,8 @@ ReverbDecayEstimator::ReverbDecayEstimator(const EchoCanceller3Config& config) late_reverb_start_(kEarlyReverbMinSizeBlocks), late_reverb_end_(kEarlyReverbMinSizeBlocks), previous_gains_(config.filter.refined.length_blocks, 0.f), - decay_(std::fabs(config.ep_strength.default_len)) { + decay_(std::fabs(config.ep_strength.default_len)), + mild_decay_(std::fabs(config.ep_strength.nearend_len)) { RTC_DCHECK_GT(config.filter.refined.length_blocks, static_cast(kEarlyReverbMinSizeBlocks)); } @@ -295,7 +296,7 @@ void ReverbDecayEstimator::LateReverbLinearRegressor::Accumulate(float z) { float ReverbDecayEstimator::LateReverbLinearRegressor::Estimate() { RTC_DCHECK(EstimateAvailable()); if (nn_ == 0.f) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return 0.f; } return nz_ / nn_; diff --git a/modules/audio_processing/aec3/reverb_decay_estimator.h b/modules/audio_processing/aec3/reverb_decay_estimator.h index 3bb9b2bb20..fee54210e6 100644 --- a/modules/audio_processing/aec3/reverb_decay_estimator.h +++ b/modules/audio_processing/aec3/reverb_decay_estimator.h @@ -34,8 +34,15 @@ class ReverbDecayEstimator { int filter_delay_blocks, bool usable_linear_filter, bool stationary_signal); - // Returns the decay for the exponential model. - float Decay() const { return decay_; } + // Returns the decay for the exponential model. The parameter `mild` indicates + // which exponential decay to return, the default one or a milder one. + float Decay(bool mild) const { + if (use_adaptive_echo_decay_) { + return decay_; + } else { + return mild ? mild_decay_ : decay_; + } + } // Dumps debug data. void Dump(ApmDataDumper* data_dumper) const; @@ -103,6 +110,7 @@ class ReverbDecayEstimator { bool estimation_region_identified_ = false; std::vector previous_gains_; float decay_; + float mild_decay_; float tail_gain_ = 0.f; float smoothing_constant_ = 0.f; }; diff --git a/modules/audio_processing/aec3/reverb_frequency_response.cc b/modules/audio_processing/aec3/reverb_frequency_response.cc index f4bd91fd48..6e7282a1fc 100644 --- a/modules/audio_processing/aec3/reverb_frequency_response.cc +++ b/modules/audio_processing/aec3/reverb_frequency_response.cc @@ -49,9 +49,13 @@ float AverageDecayWithinFilter( } // namespace -ReverbFrequencyResponse::ReverbFrequencyResponse() { - tail_response_.fill(0.f); +ReverbFrequencyResponse::ReverbFrequencyResponse( + bool use_conservative_tail_frequency_response) + : use_conservative_tail_frequency_response_( + use_conservative_tail_frequency_response) { + tail_response_.fill(0.0f); } + ReverbFrequencyResponse::~ReverbFrequencyResponse() = default; void ReverbFrequencyResponse::Update( @@ -88,6 +92,12 @@ void ReverbFrequencyResponse::Update( tail_response_[k] = freq_resp_direct_path[k] * average_decay_; } + if (use_conservative_tail_frequency_response_) { + for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) { + tail_response_[k] = std::max(freq_resp_tail[k], tail_response_[k]); + } + } + for (size_t k = 1; k < kFftLengthBy2; ++k) { const float avg_neighbour = 0.5f * (tail_response_[k - 1] + tail_response_[k + 1]); diff --git a/modules/audio_processing/aec3/reverb_frequency_response.h b/modules/audio_processing/aec3/reverb_frequency_response.h index b16418628b..69b16b54d0 100644 --- a/modules/audio_processing/aec3/reverb_frequency_response.h +++ b/modules/audio_processing/aec3/reverb_frequency_response.h @@ -23,7 +23,8 @@ namespace webrtc { // Class for updating the frequency response for the reverb. class ReverbFrequencyResponse { public: - ReverbFrequencyResponse(); + explicit ReverbFrequencyResponse( + bool use_conservative_tail_frequency_response); ~ReverbFrequencyResponse(); // Updates the frequency response estimate of the reverb. @@ -44,6 +45,7 @@ class ReverbFrequencyResponse { int filter_delay_blocks, float linear_filter_quality); + const bool use_conservative_tail_frequency_response_; float average_decay_ = 0.f; std::array tail_response_; }; diff --git a/modules/audio_processing/aec3/reverb_model_estimator.cc b/modules/audio_processing/aec3/reverb_model_estimator.cc index 717431103f..5cd7a7870d 100644 --- a/modules/audio_processing/aec3/reverb_model_estimator.cc +++ b/modules/audio_processing/aec3/reverb_model_estimator.cc @@ -15,7 +15,10 @@ namespace webrtc { ReverbModelEstimator::ReverbModelEstimator(const EchoCanceller3Config& config, size_t num_capture_channels) : reverb_decay_estimators_(num_capture_channels), - reverb_frequency_responses_(num_capture_channels) { + reverb_frequency_responses_( + num_capture_channels, + ReverbFrequencyResponse( + config.ep_strength.use_conservative_tail_frequency_response)) { for (size_t ch = 0; ch < reverb_decay_estimators_.size(); ++ch) { reverb_decay_estimators_[ch] = std::make_unique(config); diff --git a/modules/audio_processing/aec3/reverb_model_estimator.h b/modules/audio_processing/aec3/reverb_model_estimator.h index 3b9971abae..63bade977f 100644 --- a/modules/audio_processing/aec3/reverb_model_estimator.h +++ b/modules/audio_processing/aec3/reverb_model_estimator.h @@ -12,6 +12,7 @@ #define MODULES_AUDIO_PROCESSING_AEC3_REVERB_MODEL_ESTIMATOR_H_ #include +#include #include #include "absl/types/optional.h" @@ -42,9 +43,13 @@ class ReverbModelEstimator { const std::vector& usable_linear_estimates, bool stationary_block); - // Returns the exponential decay of the reverberant echo. + // Returns the exponential decay of the reverberant echo. The parameter `mild` + // indicates which exponential decay to return, the default one or a milder + // one. // TODO(peah): Correct to properly support multiple channels. - float ReverbDecay() const { return reverb_decay_estimators_[0]->Decay(); } + float ReverbDecay(bool mild) const { + return reverb_decay_estimators_[0]->Decay(mild); + } // Return the frequency response of the reverberant echo. // TODO(peah): Correct to properly support multiple channels. diff --git a/modules/audio_processing/aec3/reverb_model_estimator_unittest.cc b/modules/audio_processing/aec3/reverb_model_estimator_unittest.cc index f360a6fcbb..fb7dcef37f 100644 --- a/modules/audio_processing/aec3/reverb_model_estimator_unittest.cc +++ b/modules/audio_processing/aec3/reverb_model_estimator_unittest.cc @@ -56,7 +56,9 @@ class ReverbModelEstimatorTest { CreateImpulseResponseWithDecay(); } void RunEstimator(); - float GetDecay() { return estimated_decay_; } + float GetDecay(bool mild) { + return mild ? mild_estimated_decay_ : estimated_decay_; + } float GetTrueDecay() { return kTruePowerDecay; } float GetPowerTailDb() { return 10.f * std::log10(estimated_power_tail_); } float GetTruePowerTailDb() { return 10.f * std::log10(true_power_tail_); } @@ -67,6 +69,7 @@ class ReverbModelEstimatorTest { static constexpr float kTruePowerDecay = 0.5f; const EchoCanceller3Config aec3_config_; float estimated_decay_; + float mild_estimated_decay_; float estimated_power_tail_ = 0.f; float true_power_tail_ = 0.f; std::vector> h_; @@ -121,7 +124,8 @@ void ReverbModelEstimatorTest::RunEstimator() { estimator.Update(h_, H2_, quality_linear_, filter_delay_blocks, usable_linear_estimates, kStationaryBlock); } - estimated_decay_ = estimator.ReverbDecay(); + estimated_decay_ = estimator.ReverbDecay(/*mild=*/false); + mild_estimated_decay_ = estimator.ReverbDecay(/*mild=*/true); auto freq_resp_tail = estimator.GetReverbFrequencyResponse(); estimated_power_tail_ = std::accumulate(freq_resp_tail.begin(), freq_resp_tail.end(), 0.f); @@ -132,7 +136,9 @@ TEST(ReverbModelEstimatorTests, NotChangingDecay) { for (size_t num_capture_channels : {1, 2, 4, 8}) { ReverbModelEstimatorTest test(kDefaultDecay, num_capture_channels); test.RunEstimator(); - EXPECT_EQ(test.GetDecay(), kDefaultDecay); + EXPECT_EQ(test.GetDecay(/*mild=*/false), kDefaultDecay); + EXPECT_EQ(test.GetDecay(/*mild=*/true), + EchoCanceller3Config().ep_strength.nearend_len); EXPECT_NEAR(test.GetPowerTailDb(), test.GetTruePowerTailDb(), 5.f); } } @@ -142,7 +148,8 @@ TEST(ReverbModelEstimatorTests, ChangingDecay) { for (size_t num_capture_channels : {1, 2, 4, 8}) { ReverbModelEstimatorTest test(kDefaultDecay, num_capture_channels); test.RunEstimator(); - EXPECT_NEAR(test.GetDecay(), test.GetTrueDecay(), 0.1); + EXPECT_NEAR(test.GetDecay(/*mild=*/false), test.GetTrueDecay(), 0.1f); + EXPECT_NEAR(test.GetDecay(/*mild=*/true), test.GetTrueDecay(), 0.1f); EXPECT_NEAR(test.GetPowerTailDb(), test.GetTruePowerTailDb(), 5.f); } } diff --git a/modules/audio_processing/aec3/suppression_filter.cc b/modules/audio_processing/aec3/suppression_filter.cc index 8a813d9bd9..1312fa892b 100644 --- a/modules/audio_processing/aec3/suppression_filter.cc +++ b/modules/audio_processing/aec3/suppression_filter.cc @@ -108,12 +108,12 @@ void SuppressionFilter::ApplyGain( for (size_t i = 0; i < kFftLengthBy2Plus1; ++i) { // Apply suppression gains. - E.re[i] *= suppression_gain[i]; - E.im[i] *= suppression_gain[i]; + float E_real = E.re[i] * suppression_gain[i]; + float E_imag = E.im[i] * suppression_gain[i]; // Scale and add the comfort noise. - E.re[i] += noise_gain[i] * comfort_noise[ch].re[i]; - E.im[i] += noise_gain[i] * comfort_noise[ch].im[i]; + E.re[i] = E_real + noise_gain[i] * comfort_noise[ch].re[i]; + E.im[i] = E_imag + noise_gain[i] * comfort_noise[ch].im[i]; } // Synthesis filterbank. @@ -121,24 +121,25 @@ void SuppressionFilter::ApplyGain( constexpr float kIfftNormalization = 2.f / kFftLength; fft_.Ifft(E, &e_extended); - auto& e0 = (*e)[0][ch]; - auto& e0_old = e_output_old_[0][ch]; + float* e0 = (*e)[0][ch].data(); + float* e0_old = e_output_old_[0][ch].data(); // Window and add the first half of e_extended with the second half of // e_extended from the previous block. for (size_t i = 0; i < kFftLengthBy2; ++i) { - e0[i] = e0_old[i] * kSqrtHanning[kFftLengthBy2 + i]; - e0[i] += e_extended[i] * kSqrtHanning[i]; - e0[i] *= kIfftNormalization; + float e0_i = e0_old[i] * kSqrtHanning[kFftLengthBy2 + i]; + e0_i += e_extended[i] * kSqrtHanning[i]; + e0[i] = e0_i * kIfftNormalization; } // The second half of e_extended is stored for the succeeding frame. std::copy(e_extended.begin() + kFftLengthBy2, - e_extended.begin() + kFftLength, std::begin(e0_old)); + e_extended.begin() + kFftLength, + std::begin(e_output_old_[0][ch])); // Apply suppression gain to upper bands. for (size_t b = 1; b < e->size(); ++b) { - auto& e_band = (*e)[b][ch]; + float* e_band = (*e)[b][ch].data(); for (size_t i = 0; i < kFftLengthBy2; ++i) { e_band[i] *= high_bands_gain; } @@ -150,7 +151,7 @@ void SuppressionFilter::ApplyGain( std::array time_domain_high_band_noise; fft_.Ifft(E, &time_domain_high_band_noise); - auto& e1 = (*e)[1][ch]; + float* e1 = (*e)[1][ch].data(); const float gain = high_bands_noise_scaling * kIfftNormalization; for (size_t i = 0; i < kFftLengthBy2; ++i) { e1[i] += time_domain_high_band_noise[i] * gain; @@ -159,8 +160,8 @@ void SuppressionFilter::ApplyGain( // Delay upper bands to match the delay of the filter bank. for (size_t b = 1; b < e->size(); ++b) { - auto& e_band = (*e)[b][ch]; - auto& e_band_old = e_output_old_[b][ch]; + float* e_band = (*e)[b][ch].data(); + float* e_band_old = e_output_old_[b][ch].data(); for (size_t i = 0; i < kFftLengthBy2; ++i) { std::swap(e_band[i], e_band_old[i]); } @@ -168,7 +169,7 @@ void SuppressionFilter::ApplyGain( // Clamp output of all bands. for (size_t b = 0; b < e->size(); ++b) { - auto& e_band = (*e)[b][ch]; + float* e_band = (*e)[b][ch].data(); for (size_t i = 0; i < kFftLengthBy2; ++i) { e_band[i] = rtc::SafeClamp(e_band[i], -32768.f, 32767.f); } diff --git a/modules/audio_processing/aec3/suppression_filter.h b/modules/audio_processing/aec3/suppression_filter.h index dcf2292c7f..375bfda5a7 100644 --- a/modules/audio_processing/aec3/suppression_filter.h +++ b/modules/audio_processing/aec3/suppression_filter.h @@ -17,7 +17,6 @@ #include "modules/audio_processing/aec3/aec3_common.h" #include "modules/audio_processing/aec3/aec3_fft.h" #include "modules/audio_processing/aec3/fft_data.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -27,6 +26,10 @@ class SuppressionFilter { int sample_rate_hz, size_t num_capture_channels_); ~SuppressionFilter(); + + SuppressionFilter(const SuppressionFilter&) = delete; + SuppressionFilter& operator=(const SuppressionFilter&) = delete; + void ApplyGain(rtc::ArrayView comfort_noise, rtc::ArrayView comfort_noise_high_bands, const std::array& suppression_gain, @@ -40,7 +43,6 @@ class SuppressionFilter { const size_t num_capture_channels_; const Aec3Fft fft_; std::vector>> e_output_old_; - RTC_DISALLOW_COPY_AND_ASSIGN(SuppressionFilter); }; } // namespace webrtc diff --git a/modules/audio_processing/aec3/suppression_gain.cc b/modules/audio_processing/aec3/suppression_gain.cc index 6405d71c2d..14366f1aec 100644 --- a/modules/audio_processing/aec3/suppression_gain.cc +++ b/modules/audio_processing/aec3/suppression_gain.cc @@ -28,10 +28,6 @@ namespace webrtc { namespace { -bool UseUnboundedEchoSpectrum() { - return field_trial::IsEnabled("WebRTC-Aec3UseUnboundedEchoSpectrum"); -} - void LimitLowFrequencyGains(std::array* gain) { // Limit the low frequency gains to avoid the impact of the high-pass filter // on the lower-frequency gain influencing the overall achieved gain. @@ -348,7 +344,8 @@ SuppressionGain::SuppressionGain(const EchoCanceller3Config& config, normal_params_(config_.suppressor.last_lf_band, config_.suppressor.first_hf_band, config_.suppressor.normal_tuning), - use_unbounded_echo_spectrum_(UseUnboundedEchoSpectrum()) { + use_unbounded_echo_spectrum_(config.suppressor.dominant_nearend_detection + .use_unbounded_echo_spectrum) { RTC_DCHECK_LT(0, state_change_duration_blocks_); last_gain_.fill(1.f); if (config_.suppressor.use_subband_nearend_detection) { @@ -382,7 +379,7 @@ void SuppressionGain::GetGain( RTC_DCHECK(high_bands_gain); RTC_DCHECK(low_band_gain); - // Choose residual echo spectrum for the dominant nearend detector. + // Choose residual echo spectrum for dominant nearend detection. const auto echo = use_unbounded_echo_spectrum_ ? residual_echo_spectrum_unbounded : residual_echo_spectrum; diff --git a/modules/audio_processing/aec3/suppression_gain.h b/modules/audio_processing/aec3/suppression_gain.h index 7c4a1c9f7d..c8e13f7cf4 100644 --- a/modules/audio_processing/aec3/suppression_gain.h +++ b/modules/audio_processing/aec3/suppression_gain.h @@ -25,7 +25,6 @@ #include "modules/audio_processing/aec3/nearend_detector.h" #include "modules/audio_processing/aec3/render_signal_analyzer.h" #include "modules/audio_processing/logging/apm_data_dumper.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -36,6 +35,10 @@ class SuppressionGain { int sample_rate_hz, size_t num_capture_channels); ~SuppressionGain(); + + SuppressionGain(const SuppressionGain&) = delete; + SuppressionGain& operator=(const SuppressionGain&) = delete; + void GetGain( rtc::ArrayView> nearend_spectrum, @@ -134,8 +137,6 @@ class SuppressionGain { // echo spectrum. const bool use_unbounded_echo_spectrum_; std::unique_ptr dominant_nearend_detector_; - - RTC_DISALLOW_COPY_AND_ASSIGN(SuppressionGain); }; } // namespace webrtc diff --git a/modules/audio_processing/aec_dump/aec_dump_factory.h b/modules/audio_processing/aec_dump/aec_dump_factory.h index 429a8a5881..c902a58a34 100644 --- a/modules/audio_processing/aec_dump/aec_dump_factory.h +++ b/modules/audio_processing/aec_dump/aec_dump_factory.h @@ -26,10 +26,10 @@ namespace webrtc { class RTC_EXPORT AecDumpFactory { public: - // The |worker_queue| may not be null and must outlive the created + // The `worker_queue` may not be null and must outlive the created // AecDump instance. |max_log_size_bytes == -1| means the log size - // will be unlimited. |handle| may not be null. The AecDump takes - // responsibility for |handle| and closes it in the destructor. A + // will be unlimited. `handle` may not be null. The AecDump takes + // responsibility for `handle` and closes it in the destructor. A // non-null return value indicates that the file has been // sucessfully opened. static std::unique_ptr Create(webrtc::FileWrapper file, diff --git a/modules/audio_processing/aec_dump/aec_dump_impl.cc b/modules/audio_processing/aec_dump/aec_dump_impl.cc index db61b36c29..160583e7c3 100644 --- a/modules/audio_processing/aec_dump/aec_dump_impl.cc +++ b/modules/audio_processing/aec_dump/aec_dump_impl.cc @@ -155,7 +155,7 @@ void AecDumpImpl::WriteRenderStreamMessage( audioproc::ReverseStream* msg = event->mutable_reverse_stream(); - for (size_t i = 0; i < src.num_channels(); ++i) { + for (int i = 0; i < src.num_channels(); ++i) { const auto& channel_view = src.channel(i); msg->add_channel(channel_view.begin(), sizeof(float) * channel_view.size()); } @@ -230,7 +230,7 @@ void AecDumpImpl::WriteRuntimeSetting( break; } case AudioProcessing::RuntimeSetting::Type::kNotSpecified: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); break; } worker_queue_->PostTask(std::move(task)); diff --git a/modules/audio_processing/aec_dump/aec_dump_integration_test.cc b/modules/audio_processing/aec_dump/aec_dump_integration_test.cc index 83268b5994..503135d87f 100644 --- a/modules/audio_processing/aec_dump/aec_dump_integration_test.cc +++ b/modules/audio_processing/aec_dump/aec_dump_integration_test.cc @@ -23,10 +23,9 @@ using ::testing::Exactly; using ::testing::StrictMock; namespace { -std::unique_ptr CreateAudioProcessing() { - webrtc::Config config; - std::unique_ptr apm( - webrtc::AudioProcessingBuilderForTesting().Create(config)); +rtc::scoped_refptr CreateAudioProcessing() { + rtc::scoped_refptr apm( + webrtc::AudioProcessingBuilderForTesting().Create()); RTC_DCHECK(apm); return apm; } @@ -56,8 +55,7 @@ TEST(AecDumpIntegration, constexpr int kNumSamplesPerChannel = kNumSampleRateHz / 100; std::array frame; frame.fill(0.f); - webrtc::StreamConfig stream_config(kNumSampleRateHz, kNumChannels, - /*has_keyboard=*/false); + webrtc::StreamConfig stream_config(kNumSampleRateHz, kNumChannels); EXPECT_CALL(*mock_aec_dump.get(), WriteRenderStreamMessage(_, _, _)) .Times(Exactly(1)); @@ -76,8 +74,7 @@ TEST(AecDumpIntegration, CaptureStreamShouldBeLoggedOnceEveryProcessStream) { std::array frame; frame.fill(0.f); - webrtc::StreamConfig stream_config(kNumSampleRateHz, kNumChannels, - /*has_keyboard=*/false); + webrtc::StreamConfig stream_config(kNumSampleRateHz, kNumChannels); EXPECT_CALL(*mock_aec_dump.get(), AddCaptureStreamInput(_, _, _)) .Times(AtLeast(1)); diff --git a/modules/audio_processing/aec_dump/capture_stream_info.cc b/modules/audio_processing/aec_dump/capture_stream_info.cc index 907cd97793..efdda2570f 100644 --- a/modules/audio_processing/aec_dump/capture_stream_info.cc +++ b/modules/audio_processing/aec_dump/capture_stream_info.cc @@ -23,7 +23,7 @@ void CaptureStreamInfo::AddInput(const AudioFrameView& src) { RTC_DCHECK(task_); auto* stream = task_->GetEvent()->mutable_stream(); - for (size_t i = 0; i < src.num_channels(); ++i) { + for (int i = 0; i < src.num_channels(); ++i) { const auto& channel_view = src.channel(i); stream->add_input_channel(channel_view.begin(), sizeof(float) * channel_view.size()); @@ -34,7 +34,7 @@ void CaptureStreamInfo::AddOutput(const AudioFrameView& src) { RTC_DCHECK(task_); auto* stream = task_->GetEvent()->mutable_stream(); - for (size_t i = 0; i < src.num_channels(); ++i) { + for (int i = 0; i < src.num_channels(); ++i) { const auto& channel_view = src.channel(i); stream->add_output_channel(channel_view.begin(), sizeof(float) * channel_view.size()); diff --git a/modules/audio_processing/aec_dump/write_to_file_task.cc b/modules/audio_processing/aec_dump/write_to_file_task.cc index 4839a0927c..cd083d295b 100644 --- a/modules/audio_processing/aec_dump/write_to_file_task.cc +++ b/modules/audio_processing/aec_dump/write_to_file_task.cc @@ -55,10 +55,10 @@ bool WriteToFileTask::Run() { // Write message preceded by its size. if (!debug_file_->Write(&event_byte_size, sizeof(int32_t))) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } if (!debug_file_->Write(event_string.data(), event_string.length())) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } return true; // Delete task from queue at once. } diff --git a/modules/audio_processing/aecm/aecm_core.cc b/modules/audio_processing/aecm/aecm_core.cc index 78c013323a..fbc3239732 100644 --- a/modules/audio_processing/aecm/aecm_core.cc +++ b/modules/audio_processing/aecm/aecm_core.cc @@ -124,7 +124,7 @@ const int16_t WebRtcAecm_kSinTable[] = { -1140, -998, -856, -713, -571, -428, -285, -142}; -// Moves the pointer to the next entry and inserts |far_spectrum| and +// Moves the pointer to the next entry and inserts `far_spectrum` and // corresponding Q-domain in its buffer. // // Inputs: @@ -574,7 +574,7 @@ int WebRtcAecm_ProcessFrame(AecmCore* aecm, // Obtain an output frame. WebRtc_ReadBuffer(aecm->outFrameBuf, (void**)&out_ptr, out, FRAME_LEN); if (out_ptr != out) { - // ReadBuffer() hasn't copied to |out| in this case. + // ReadBuffer() hasn't copied to `out` in this case. memcpy(out, out_ptr, FRAME_LEN * sizeof(int16_t)); } @@ -616,22 +616,22 @@ int16_t WebRtcAecm_AsymFilt(const int16_t filtOld, // ExtractFractionPart(a, zeros) // -// returns the fraction part of |a|, with |zeros| number of leading zeros, as an -// int16_t scaled to Q8. There is no sanity check of |a| in the sense that the +// returns the fraction part of `a`, with `zeros` number of leading zeros, as an +// int16_t scaled to Q8. There is no sanity check of `a` in the sense that the // number of zeros match. static int16_t ExtractFractionPart(uint32_t a, int zeros) { return (int16_t)(((a << zeros) & 0x7FFFFFFF) >> 23); } -// Calculates and returns the log of |energy| in Q8. The input |energy| is -// supposed to be in Q(|q_domain|). +// Calculates and returns the log of `energy` in Q8. The input `energy` is +// supposed to be in Q(`q_domain`). static int16_t LogOfEnergyInQ8(uint32_t energy, int q_domain) { static const int16_t kLogLowValue = PART_LEN_SHIFT << 7; int16_t log_energy_q8 = kLogLowValue; if (energy > 0) { int zeros = WebRtcSpl_NormU32(energy); int16_t frac = ExtractFractionPart(energy, zeros); - // log2 of |energy| in Q8. + // log2 of `energy` in Q8. log_energy_q8 += ((31 - zeros) << 8) + frac - (q_domain << 8); } return log_energy_q8; diff --git a/modules/audio_processing/aecm/aecm_core.h b/modules/audio_processing/aecm/aecm_core.h index aaa74e1721..3de49315c4 100644 --- a/modules/audio_processing/aecm/aecm_core.h +++ b/modules/audio_processing/aecm/aecm_core.h @@ -58,7 +58,7 @@ typedef struct { void* delay_estimator; uint16_t currentDelay; // Far end history variables - // TODO(bjornv): Replace |far_history| with ring_buffer. + // TODO(bjornv): Replace `far_history` with ring_buffer. uint16_t far_history[PART_LEN1 * MAX_DELAY]; int far_history_pos; int far_q_domains[MAX_DELAY]; @@ -248,7 +248,7 @@ int WebRtcAecm_ProcessBlock(AecmCore* aecm, // void WebRtcAecm_BufferFarFrame(AecmCore* const aecm, const int16_t* const farend, - const int farLen); + int farLen); //////////////////////////////////////////////////////////////////////////////// // WebRtcAecm_FetchFarFrame() @@ -263,15 +263,15 @@ void WebRtcAecm_BufferFarFrame(AecmCore* const aecm, // void WebRtcAecm_FetchFarFrame(AecmCore* const aecm, int16_t* const farend, - const int farLen, - const int knownDelay); + int farLen, + int knownDelay); // All the functions below are intended to be private //////////////////////////////////////////////////////////////////////////////// // WebRtcAecm_UpdateFarHistory() // -// Moves the pointer to the next entry and inserts |far_spectrum| and +// Moves the pointer to the next entry and inserts `far_spectrum` and // corresponding Q-domain in its buffer. // // Inputs: @@ -339,8 +339,8 @@ int16_t WebRtcAecm_CalcSuppressionGain(AecmCore* const aecm); // void WebRtcAecm_CalcEnergies(AecmCore* aecm, const uint16_t* far_spectrum, - const int16_t far_q, - const uint32_t nearEner, + int16_t far_q, + uint32_t nearEner, int32_t* echoEst); /////////////////////////////////////////////////////////////////////////////// @@ -374,9 +374,9 @@ int16_t WebRtcAecm_CalcStepSize(AecmCore* const aecm); // void WebRtcAecm_UpdateChannel(AecmCore* aecm, const uint16_t* far_spectrum, - const int16_t far_q, + int16_t far_q, const uint16_t* const dfa, - const int16_t mu, + int16_t mu, int32_t* echoEst); extern const int16_t WebRtcAecm_kCosTable[]; diff --git a/modules/audio_processing/aecm/aecm_core_c.cc b/modules/audio_processing/aecm/aecm_core_c.cc index 7b6ca59371..d363dd2cfd 100644 --- a/modules/audio_processing/aecm/aecm_core_c.cc +++ b/modules/audio_processing/aecm/aecm_core_c.cc @@ -98,7 +98,7 @@ static void ComfortNoise(AecmCore* aecm, // Track the minimum. if (aecm->noiseEst[i] < (1 << minTrackShift)) { // For small values, decrease noiseEst[i] every - // |kNoiseEstIncCount| block. The regular approach below can not + // `kNoiseEstIncCount` block. The regular approach below can not // go further down due to truncation. aecm->noiseEstTooHighCtr[i]++; if (aecm->noiseEstTooHighCtr[i] >= kNoiseEstIncCount) { @@ -125,7 +125,7 @@ static void ComfortNoise(AecmCore* aecm, aecm->noiseEst[i] >>= 11; } else { // Make incremental increases based on size every - // |kNoiseEstIncCount| block + // `kNoiseEstIncCount` block aecm->noiseEstTooLowCtr[i]++; if (aecm->noiseEstTooLowCtr[i] >= kNoiseEstIncCount) { aecm->noiseEst[i] += (aecm->noiseEst[i] >> 9) + 1; @@ -181,7 +181,7 @@ static void WindowAndFFT(AecmCore* aecm, // FFT of signal for (i = 0; i < PART_LEN; i++) { // Window time domain signal and insert into real part of - // transformation array |fft| + // transformation array `fft` int16_t scaled_time_signal = time_signal[i] * (1 << time_signal_scaling); fft[i] = (int16_t)((scaled_time_signal * WebRtcAecm_kSqrtHanning[i]) >> 14); scaled_time_signal = time_signal[i + PART_LEN] * (1 << time_signal_scaling); @@ -204,8 +204,8 @@ static void InverseFFTAndWindow(AecmCore* aecm, const int16_t* nearendClean) { int i, j, outCFFT; int32_t tmp32no1; - // Reuse |efw| for the inverse FFT output after transferring - // the contents to |fft|. + // Reuse `efw` for the inverse FFT output after transferring + // the contents to `fft`. int16_t* ifft_out = (int16_t*)efw; // Synthesis @@ -312,7 +312,7 @@ static int TimeToFrequencyDomain(AecmCore* aecm, } else { // Approximation for magnitude of complex fft output // magn = sqrt(real^2 + imag^2) - // magn ~= alpha * max(|imag|,|real|) + beta * min(|imag|,|real|) + // magn ~= alpha * max(`imag`,`real`) + beta * min(`imag`,`real`) // // The parameters alpha and beta are stored in Q15 @@ -541,7 +541,7 @@ int RTC_NO_SANITIZE("signed-integer-overflow") // bugs.webrtc.org/8200 } zeros16 = WebRtcSpl_NormW16(aecm->nearFilt[i]); - RTC_DCHECK_GE(zeros16, 0); // |zeros16| is a norm, hence non-negative. + RTC_DCHECK_GE(zeros16, 0); // `zeros16` is a norm, hence non-negative. dfa_clean_q_domain_diff = aecm->dfaCleanQDomain - aecm->dfaCleanQDomainOld; if (zeros16 < dfa_clean_q_domain_diff && aecm->nearFilt[i]) { tmp16no1 = aecm->nearFilt[i] * (1 << zeros16); diff --git a/modules/audio_processing/aecm/aecm_core_mips.cc b/modules/audio_processing/aecm/aecm_core_mips.cc index f2f43e1146..828aa6d2fb 100644 --- a/modules/audio_processing/aecm/aecm_core_mips.cc +++ b/modules/audio_processing/aecm/aecm_core_mips.cc @@ -822,7 +822,7 @@ static int TimeToFrequencyDomain(AecmCore* aecm, } else { // Approximation for magnitude of complex fft output // magn = sqrt(real^2 + imag^2) - // magn ~= alpha * max(|imag|,|real|) + beta * min(|imag|,|real|) + // magn ~= alpha * max(`imag`,`real`) + beta * min(`imag`,`real`) // // The parameters alpha and beta are stored in Q15 tmp16no1 = WEBRTC_SPL_ABS_W16(freq_signal[i].real); @@ -1106,7 +1106,7 @@ int WebRtcAecm_ProcessBlock(AecmCore* aecm, } zeros16 = WebRtcSpl_NormW16(aecm->nearFilt[i]); - RTC_DCHECK_GE(zeros16, 0); // |zeros16| is a norm, hence non-negative. + RTC_DCHECK_GE(zeros16, 0); // `zeros16` is a norm, hence non-negative. dfa_clean_q_domain_diff = aecm->dfaCleanQDomain - aecm->dfaCleanQDomainOld; if (zeros16 < dfa_clean_q_domain_diff && aecm->nearFilt[i]) { tmp16no1 = aecm->nearFilt[i] << zeros16; @@ -1411,7 +1411,7 @@ static void ComfortNoise(AecmCore* aecm, // Track the minimum. if (tnoise < (1 << minTrackShift)) { // For small values, decrease noiseEst[i] every - // |kNoiseEstIncCount| block. The regular approach below can not + // `kNoiseEstIncCount` block. The regular approach below can not // go further down due to truncation. aecm->noiseEstTooHighCtr[i]++; if (aecm->noiseEstTooHighCtr[i] >= kNoiseEstIncCount) { @@ -1442,7 +1442,7 @@ static void ComfortNoise(AecmCore* aecm, : "hi", "lo"); } else { // Make incremental increases based on size every - // |kNoiseEstIncCount| block + // `kNoiseEstIncCount` block aecm->noiseEstTooLowCtr[i]++; if (aecm->noiseEstTooLowCtr[i] >= kNoiseEstIncCount) { __asm __volatile( @@ -1484,7 +1484,7 @@ static void ComfortNoise(AecmCore* aecm, // Track the minimum. if (tnoise1 < (1 << minTrackShift)) { // For small values, decrease noiseEst[i] every - // |kNoiseEstIncCount| block. The regular approach below can not + // `kNoiseEstIncCount` block. The regular approach below can not // go further down due to truncation. aecm->noiseEstTooHighCtr[i + 1]++; if (aecm->noiseEstTooHighCtr[i + 1] >= kNoiseEstIncCount) { @@ -1515,7 +1515,7 @@ static void ComfortNoise(AecmCore* aecm, : "hi", "lo"); } else { // Make incremental increases based on size every - // |kNoiseEstIncCount| block + // `kNoiseEstIncCount` block aecm->noiseEstTooLowCtr[i + 1]++; if (aecm->noiseEstTooLowCtr[i + 1] >= kNoiseEstIncCount) { __asm __volatile( diff --git a/modules/audio_processing/agc/BUILD.gn b/modules/audio_processing/agc/BUILD.gn index 4bb8c5494b..eef1b77560 100644 --- a/modules/audio_processing/agc/BUILD.gn +++ b/modules/audio_processing/agc/BUILD.gn @@ -27,6 +27,7 @@ rtc_library("agc") { "..:apm_logging", "..:audio_buffer", "..:audio_frame_view", + "../../../api:array_view", "../../../common_audio", "../../../common_audio:common_audio_c", "../../../rtc_base:checks", @@ -41,6 +42,20 @@ rtc_library("agc") { absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } +rtc_library("analog_gain_stats_reporter") { + sources = [ + "analog_gain_stats_reporter.cc", + "analog_gain_stats_reporter.h", + ] + deps = [ + "../../../rtc_base:gtest_prod", + "../../../rtc_base:logging", + "../../../rtc_base:safe_minmax", + "../../../system_wrappers:metrics", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] +} + rtc_library("clipping_predictor") { sources = [ "clipping_predictor.cc", @@ -94,6 +109,7 @@ rtc_library("level_estimation") { "utility.h", ] deps = [ + "../../../api:array_view", "../../../rtc_base:checks", "../vad", ] @@ -142,6 +158,7 @@ if (rtc_include_tests) { testonly = true sources = [ "agc_manager_direct_unittest.cc", + "analog_gain_stats_reporter_unittest.cc", "clipping_predictor_evaluator_unittest.cc", "clipping_predictor_level_buffer_unittest.cc", "clipping_predictor_unittest.cc", @@ -152,15 +169,19 @@ if (rtc_include_tests) { deps = [ ":agc", + ":analog_gain_stats_reporter", ":clipping_predictor", ":clipping_predictor_evaluator", ":clipping_predictor_level_buffer", ":gain_control_interface", ":level_estimation", "..:mocks", + "../../../api:array_view", "../../../rtc_base:checks", "../../../rtc_base:rtc_base_approved", "../../../rtc_base:safe_conversions", + "../../../rtc_base:stringutils", + "../../../system_wrappers:metrics", "../../../test:field_trial", "../../../test:fileutils", "../../../test:test_support", diff --git a/modules/audio_processing/agc/agc.cc b/modules/audio_processing/agc/agc.cc index a89ae111ea..a018ff9f93 100644 --- a/modules/audio_processing/agc/agc.cc +++ b/modules/audio_processing/agc/agc.cc @@ -21,9 +21,11 @@ namespace webrtc { namespace { -const int kDefaultLevelDbfs = -18; -const int kNumAnalysisFrames = 100; -const double kActivityThreshold = 0.3; +constexpr int kDefaultLevelDbfs = -18; +constexpr int kNumAnalysisFrames = 100; +constexpr double kActivityThreshold = 0.3; +constexpr int kNum10msFramesInOneSecond = 100; +constexpr int kMaxSampleRateHz = 384000; } // namespace @@ -35,8 +37,10 @@ Agc::Agc() Agc::~Agc() = default; -void Agc::Process(const int16_t* audio, size_t length, int sample_rate_hz) { - vad_.ProcessChunk(audio, length, sample_rate_hz); +void Agc::Process(rtc::ArrayView audio) { + const int sample_rate_hz = audio.size() * kNum10msFramesInOneSecond; + RTC_DCHECK_LE(sample_rate_hz, kMaxSampleRateHz); + vad_.ProcessChunk(audio.data(), audio.size(), sample_rate_hz); const std::vector& rms = vad_.chunkwise_rms(); const std::vector& probabilities = vad_.chunkwise_voice_probabilities(); @@ -48,7 +52,7 @@ void Agc::Process(const int16_t* audio, size_t length, int sample_rate_hz) { bool Agc::GetRmsErrorDb(int* error) { if (!error) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return false; } diff --git a/modules/audio_processing/agc/agc.h b/modules/audio_processing/agc/agc.h index b9bd5ea07b..da42808225 100644 --- a/modules/audio_processing/agc/agc.h +++ b/modules/audio_processing/agc/agc.h @@ -13,6 +13,7 @@ #include +#include "api/array_view.h" #include "modules/audio_processing/vad/voice_activity_detector.h" namespace webrtc { @@ -24,13 +25,13 @@ class Agc { Agc(); virtual ~Agc(); - // |audio| must be mono; in a multi-channel stream, provide the first (usually + // `audio` must be mono; in a multi-channel stream, provide the first (usually // left) channel. - virtual void Process(const int16_t* audio, size_t length, int sample_rate_hz); + virtual void Process(rtc::ArrayView audio); // Retrieves the difference between the target RMS level and the current // signal RMS level in dB. Returns true if an update is available and false - // otherwise, in which case |error| should be ignored and no action taken. + // otherwise, in which case `error` should be ignored and no action taken. virtual bool GetRmsErrorDb(int* error); virtual void Reset(); diff --git a/modules/audio_processing/agc/agc_manager_direct.cc b/modules/audio_processing/agc/agc_manager_direct.cc index e2a5b998a4..8bce7690a3 100644 --- a/modules/audio_processing/agc/agc_manager_direct.cc +++ b/modules/audio_processing/agc/agc_manager_direct.cc @@ -13,6 +13,7 @@ #include #include +#include "api/array_view.h" #include "common_audio/include/audio_util.h" #include "modules/audio_processing/agc/gain_control.h" #include "modules/audio_processing/agc/gain_map_internal.h" @@ -62,28 +63,27 @@ bool UseMaxAnalogChannelLevel() { return field_trial::IsEnabled("WebRTC-UseMaxAnalogAgcChannelLevel"); } -// Returns kMinMicLevel if no field trial exists or if it has been disabled. -// Returns a value between 0 and 255 depending on the field-trial string. -// Example: 'WebRTC-Audio-AgcMinMicLevelExperiment/Enabled-80' => returns 80. -int GetMinMicLevel() { - RTC_LOG(LS_INFO) << "[agc] GetMinMicLevel"; +// If the "WebRTC-Audio-AgcMinMicLevelExperiment" field trial is specified, +// parses it and returns a value between 0 and 255 depending on the field-trial +// string. Returns an unspecified value if the field trial is not specified, if +// disabled or if it cannot be parsed. Example: +// 'WebRTC-Audio-AgcMinMicLevelExperiment/Enabled-80' => returns 80. +absl::optional GetMinMicLevelOverride() { constexpr char kMinMicLevelFieldTrial[] = "WebRTC-Audio-AgcMinMicLevelExperiment"; if (!webrtc::field_trial::IsEnabled(kMinMicLevelFieldTrial)) { - RTC_LOG(LS_INFO) << "[agc] Using default min mic level: " << kMinMicLevel; - return kMinMicLevel; + return absl::nullopt; } const auto field_trial_string = webrtc::field_trial::FindFullName(kMinMicLevelFieldTrial); int min_mic_level = -1; sscanf(field_trial_string.c_str(), "Enabled-%d", &min_mic_level); if (min_mic_level >= 0 && min_mic_level <= 255) { - RTC_LOG(LS_INFO) << "[agc] Experimental min mic level: " << min_mic_level; return min_mic_level; } else { RTC_LOG(LS_WARNING) << "[agc] Invalid parameter for " << kMinMicLevelFieldTrial << ", ignored."; - return kMinMicLevel; + return absl::nullopt; } } @@ -124,7 +124,7 @@ float ComputeClippedRatio(const float* const* audio, int num_clipped_in_ch = 0; for (size_t i = 0; i < samples_per_channel; ++i) { RTC_DCHECK(audio[ch]); - if (audio[ch][i] >= 32767.f || audio[ch][i] <= -32768.f) { + if (audio[ch][i] >= 32767.0f || audio[ch][i] <= -32768.0f) { ++num_clipped_in_ch; } } @@ -134,36 +134,43 @@ float ComputeClippedRatio(const float* const* audio, } void LogClippingPredictorMetrics(const ClippingPredictorEvaluator& evaluator) { - RTC_LOG(LS_INFO) << "Clipping predictor metrics: TP " - << evaluator.true_positives() << " TN " - << evaluator.true_negatives() << " FP " - << evaluator.false_positives() << " FN " - << evaluator.false_negatives(); - const float precision_denominator = - evaluator.true_positives() + evaluator.false_positives(); - const float recall_denominator = - evaluator.true_positives() + evaluator.false_negatives(); - if (precision_denominator > 0 && recall_denominator > 0) { - const float precision = evaluator.true_positives() / precision_denominator; - const float recall = evaluator.true_positives() / recall_denominator; - RTC_LOG(LS_INFO) << "Clipping predictor metrics: P " << precision << " R " - << recall; - const float f1_score_denominator = precision + recall; - if (f1_score_denominator > 0.0f) { - const float f1_score = 2 * precision * recall / f1_score_denominator; - RTC_LOG(LS_INFO) << "Clipping predictor metrics: F1 " << f1_score; - RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.Agc.ClippingPredictor.F1Score", - std::round(f1_score * 100.0f), /*min=*/0, - /*max=*/100, - /*bucket_count=*/50); - } + absl::optional metrics = + ComputeClippingPredictionMetrics(evaluator.counters()); + if (metrics.has_value()) { + RTC_LOG(LS_INFO) << "Clipping predictor metrics: P " << metrics->precision + << " R " << metrics->recall << " F1 score " + << metrics->f1_score; + RTC_DCHECK_GE(metrics->f1_score, 0.0f); + RTC_DCHECK_LE(metrics->f1_score, 1.0f); + RTC_DCHECK_GE(metrics->precision, 0.0f); + RTC_DCHECK_LE(metrics->precision, 1.0f); + RTC_DCHECK_GE(metrics->recall, 0.0f); + RTC_DCHECK_LE(metrics->recall, 1.0f); + RTC_HISTOGRAM_COUNTS_LINEAR( + /*name=*/"WebRTC.Audio.Agc.ClippingPredictor.F1Score", + /*sample=*/std::round(metrics->f1_score * 100.0f), + /*min=*/0, + /*max=*/100, + /*bucket_count=*/50); + RTC_HISTOGRAM_COUNTS_LINEAR( + /*name=*/"WebRTC.Audio.Agc.ClippingPredictor.Precision", + /*sample=*/std::round(metrics->precision * 100.0f), + /*min=*/0, + /*max=*/100, + /*bucket_count=*/50); + RTC_HISTOGRAM_COUNTS_LINEAR( + /*name=*/"WebRTC.Audio.Agc.ClippingPredictor.Recall", + /*sample=*/std::round(metrics->recall * 100.0f), + /*min=*/0, + /*max=*/100, + /*bucket_count=*/50); } } void LogClippingMetrics(int clipping_rate) { RTC_LOG(LS_INFO) << "Input clipping rate: " << clipping_rate << "%"; - RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.Agc.InputClippingRate", - clipping_rate, /*min=*/0, /*max=*/100, + RTC_HISTOGRAM_COUNTS_LINEAR(/*name=*/"WebRTC.Audio.Agc.InputClippingRate", + /*sample=*/clipping_rate, /*min=*/0, /*max=*/100, /*bucket_count=*/50); } @@ -197,9 +204,7 @@ void MonoAgc::Initialize() { check_volume_on_next_process_ = true; } -void MonoAgc::Process(const int16_t* audio, - size_t samples_per_channel, - int sample_rate_hz) { +void MonoAgc::Process(rtc::ArrayView audio) { new_compression_to_set_ = absl::nullopt; if (check_volume_on_next_process_) { @@ -209,7 +214,7 @@ void MonoAgc::Process(const int16_t* audio, CheckVolumeAndReset(); } - agc_->Process(audio, samples_per_channel, sample_rate_hz); + agc_->Process(audio); UpdateGain(); if (!disable_digital_adaptive_) { @@ -280,7 +285,7 @@ void MonoAgc::SetLevel(int new_level) { void MonoAgc::SetMaxLevel(int level) { RTC_DCHECK_GE(level, clipped_level_min_); max_level_ = level; - // Scale the |kSurplusCompressionGain| linearly across the restricted + // Scale the `kSurplusCompressionGain` linearly across the restricted // level range. max_compression_gain_ = kMaxCompressionGain + std::floor((1.f * kMaxMicLevel - max_level_) / @@ -307,7 +312,7 @@ int MonoAgc::CheckVolumeAndReset() { int level = stream_analog_level_; // Reasons for taking action at startup: // 1) A person starting a call is expected to be heard. - // 2) Independent of interpretation of |level| == 0 we should raise it so the + // 2) Independent of interpretation of `level` == 0 we should raise it so the // AGC can do its job properly. if (level == 0 && !startup_) { RTC_DLOG(LS_INFO) @@ -440,7 +445,6 @@ AgcManagerDirect::AgcManagerDirect( Agc* agc, int startup_min_level, int clipped_level_min, - int sample_rate_hz, int clipped_level_step, float clipped_ratio_threshold, int clipped_wait_frames, @@ -449,7 +453,6 @@ AgcManagerDirect::AgcManagerDirect( startup_min_level, clipped_level_min, /*disable_digital_adaptive*/ false, - sample_rate_hz, clipped_level_step, clipped_ratio_threshold, clipped_wait_frames, @@ -464,15 +467,14 @@ AgcManagerDirect::AgcManagerDirect( int startup_min_level, int clipped_level_min, bool disable_digital_adaptive, - int sample_rate_hz, int clipped_level_step, float clipped_ratio_threshold, int clipped_wait_frames, const ClippingPredictorConfig& clipping_config) - : data_dumper_( + : min_mic_level_override_(GetMinMicLevelOverride()), + data_dumper_( new ApmDataDumper(rtc::AtomicOps::Increment(&instance_counter_))), use_min_channel_level_(!UseMaxAnalogChannelLevel()), - sample_rate_hz_(sample_rate_hz), num_capture_channels_(num_capture_channels), disable_digital_adaptive_(disable_digital_adaptive), frames_since_clipped_(clipped_wait_frames), @@ -490,7 +492,11 @@ AgcManagerDirect::AgcManagerDirect( clipping_predictor_log_counter_(0), clipping_rate_log_(0.0f), clipping_rate_log_counter_(0) { - const int min_mic_level = GetMinMicLevel(); + const int min_mic_level = min_mic_level_override_.value_or(kMinMicLevel); + RTC_LOG(LS_INFO) << "[agc] Min mic level: " << min_mic_level + << " (overridden: " + << (min_mic_level_override_.has_value() ? "yes" : "no") + << ")"; for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) { ApmDataDumper* data_dumper_ch = ch == 0 ? data_dumper_.get() : nullptr; @@ -597,7 +603,7 @@ void AgcManagerDirect::AnalyzePreProcess(const float* const* audio, const auto step = clipping_predictor_->EstimateClippedLevelStep( channel, stream_analog_level_, clipped_level_step_, channel_agcs_[channel]->min_mic_level(), kMaxMicLevel); - if (use_clipping_predictor_step_ && step.has_value()) { + if (step.has_value()) { predicted_step = std::max(predicted_step, step.value()); clipping_predicted = true; } @@ -618,50 +624,47 @@ void AgcManagerDirect::AnalyzePreProcess(const float* const* audio, clipping_predictor_log_counter_ = 0; } } - if (clipping_detected || clipping_predicted) { - int step = clipped_level_step_; - if (clipping_detected) { - RTC_DLOG(LS_INFO) << "[agc] Clipping detected. clipped_ratio=" - << clipped_ratio; - } - if (clipping_predicted) { - step = std::max(predicted_step, clipped_level_step_); - RTC_DLOG(LS_INFO) << "[agc] Clipping predicted. step=" << step; + if (clipping_detected) { + RTC_DLOG(LS_INFO) << "[agc] Clipping detected. clipped_ratio=" + << clipped_ratio; + } + int step = clipped_level_step_; + if (clipping_predicted) { + predicted_step = std::max(predicted_step, clipped_level_step_); + RTC_DLOG(LS_INFO) << "[agc] Clipping predicted. step=" << predicted_step; + if (use_clipping_predictor_step_) { + step = predicted_step; } + } + if (clipping_detected || + (clipping_predicted && use_clipping_predictor_step_)) { for (auto& state_ch : channel_agcs_) { state_ch->HandleClipping(step); } frames_since_clipped_ = 0; if (!!clipping_predictor_) { clipping_predictor_->Reset(); - clipping_predictor_evaluator_.Reset(); + clipping_predictor_evaluator_.RemoveExpectations(); } } AggregateChannelLevels(); } void AgcManagerDirect::Process(const AudioBuffer* audio) { + RTC_DCHECK(audio); AggregateChannelLevels(); if (!capture_output_used_) { return; } + const size_t num_frames_per_band = audio->num_frames_per_band(); for (size_t ch = 0; ch < channel_agcs_.size(); ++ch) { - int16_t* audio_use = nullptr; std::array audio_data; - int num_frames_per_band; - if (audio) { - FloatS16ToS16(audio->split_bands_const_f(ch)[0], - audio->num_frames_per_band(), audio_data.data()); - audio_use = audio_data.data(); - num_frames_per_band = audio->num_frames_per_band(); - } else { - // Only used for testing. - // TODO(peah): Change unittests to only allow on non-null audio input. - num_frames_per_band = 320; - } - channel_agcs_[ch]->Process(audio_use, num_frames_per_band, sample_rate_hz_); + int16_t* audio_use = audio_data.data(); + FloatS16ToS16(audio->split_bands_const_f(ch)[0], num_frames_per_band, + audio_use); + channel_agcs_[ch]->Process({audio_use, num_frames_per_band}); new_compressions_to_set_[ch] = channel_agcs_[ch]->new_compression(); } @@ -716,6 +719,10 @@ void AgcManagerDirect::AggregateChannelLevels() { } } } + if (min_mic_level_override_.has_value()) { + stream_analog_level_ = + std::max(stream_analog_level_, *min_mic_level_override_); + } } } // namespace webrtc diff --git a/modules/audio_processing/agc/agc_manager_direct.h b/modules/audio_processing/agc/agc_manager_direct.h index d80a255ced..ce67a971b4 100644 --- a/modules/audio_processing/agc/agc_manager_direct.h +++ b/modules/audio_processing/agc/agc_manager_direct.h @@ -14,6 +14,7 @@ #include #include "absl/types/optional.h" +#include "api/array_view.h" #include "modules/audio_processing/agc/agc.h" #include "modules/audio_processing/agc/clipping_predictor.h" #include "modules/audio_processing/agc/clipping_predictor_evaluator.h" @@ -47,7 +48,6 @@ class AgcManagerDirect final { int startup_min_level, int clipped_level_min, bool disable_digital_adaptive, - int sample_rate_hz, int clipped_level_step, float clipped_ratio_threshold, int clipped_wait_frames, @@ -72,7 +72,6 @@ class AgcManagerDirect final { int stream_analog_level() const { return stream_analog_level_; } void set_stream_analog_level(int level); int num_channels() const { return num_capture_channels_; } - int sample_rate_hz() const { return sample_rate_hz_; } // If available, returns a new compression gain for the digital gain control. absl::optional GetDigitalComressionGain(); @@ -106,19 +105,17 @@ class AgcManagerDirect final { ClippingParametersVerified); FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectStandaloneTest, DisableClippingPredictorDoesNotLowerVolume); - FRIEND_TEST_ALL_PREFIXES( - AgcManagerDirectStandaloneTest, - EnableClippingPredictorWithUnusedPredictedStepDoesNotLowerVolume); FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectStandaloneTest, - EnableClippingPredictorLowersVolume); + UsedClippingPredictionsProduceLowerAnalogLevels); + FRIEND_TEST_ALL_PREFIXES(AgcManagerDirectStandaloneTest, + UnusedClippingPredictionsProduceEqualAnalogLevels); - // Dependency injection for testing. Don't delete |agc| as the memory is owned + // Dependency injection for testing. Don't delete `agc` as the memory is owned // by the manager. AgcManagerDirect( Agc* agc, int startup_min_level, int clipped_level_min, - int sample_rate_hz, int clipped_level_step, float clipped_ratio_threshold, int clipped_wait_frames, @@ -129,10 +126,10 @@ class AgcManagerDirect final { void AggregateChannelLevels(); + const absl::optional min_mic_level_override_; std::unique_ptr data_dumper_; static int instance_counter_; const bool use_min_channel_level_; - const int sample_rate_hz_; const int num_capture_channels_; const bool disable_digital_adaptive_; @@ -172,9 +169,7 @@ class MonoAgc { void HandleClipping(int clipped_level_step); - void Process(const int16_t* audio, - size_t samples_per_channel, - int sample_rate_hz); + void Process(rtc::ArrayView audio); void set_stream_analog_level(int level) { stream_analog_level_ = level; } int stream_analog_level() const { return stream_analog_level_; } @@ -196,7 +191,7 @@ class MonoAgc { // Set the maximum level the AGC is allowed to apply. Also updates the // maximum compression gain to compensate. The level must be at least - // |kClippedLevelMin|. + // `kClippedLevelMin`. void SetMaxLevel(int level); int CheckVolumeAndReset(); diff --git a/modules/audio_processing/agc/agc_manager_direct_unittest.cc b/modules/audio_processing/agc/agc_manager_direct_unittest.cc index bb284f9abc..d727449229 100644 --- a/modules/audio_processing/agc/agc_manager_direct_unittest.cc +++ b/modules/audio_processing/agc/agc_manager_direct_unittest.cc @@ -10,9 +10,12 @@ #include "modules/audio_processing/agc/agc_manager_direct.h" +#include + #include "modules/audio_processing/agc/gain_control.h" #include "modules/audio_processing/agc/mock_agc.h" #include "modules/audio_processing/include/mock_audio_processing.h" +#include "rtc_base/strings/string_builder.h" #include "test/field_trial.h" #include "test/gmock.h" #include "test/gtest.h" @@ -37,6 +40,9 @@ constexpr int kClippedLevelStep = 15; constexpr float kClippedRatioThreshold = 0.1f; constexpr int kClippedWaitFrames = 300; +constexpr AudioProcessing::Config::GainController1::AnalogGainController + kDefaultAnalogConfig{}; + using ClippingPredictorConfig = AudioProcessing::Config::GainController1:: AnalogGainController::ClippingPredictor; @@ -69,8 +75,9 @@ std::unique_ptr CreateAgcManagerDirect( int clipped_wait_frames) { return std::make_unique( /*num_capture_channels=*/1, startup_min_level, kClippedMin, - /*disable_digital_adaptive=*/true, kSampleRateHz, clipped_level_step, - clipped_ratio_threshold, clipped_wait_frames, ClippingPredictorConfig()); + /*disable_digital_adaptive=*/true, clipped_level_step, + clipped_ratio_threshold, clipped_wait_frames, + kDefaultAnalogConfig.clipping_predictor); } std::unique_ptr CreateAgcManagerDirect( @@ -81,30 +88,39 @@ std::unique_ptr CreateAgcManagerDirect( const ClippingPredictorConfig& clipping_cfg) { return std::make_unique( /*num_capture_channels=*/1, startup_min_level, kClippedMin, - /*disable_digital_adaptive=*/true, kSampleRateHz, clipped_level_step, + /*disable_digital_adaptive=*/true, clipped_level_step, clipped_ratio_threshold, clipped_wait_frames, clipping_cfg); } +// Calls `AnalyzePreProcess()` on `manager` `num_calls` times. `peak_ratio` is a +// value in [0, 1] which determines the amplitude of the samples (1 maps to full +// scale). The first half of the calls is made on frames which are half filled +// with zeros in order to simulate a signal with different crest factors. void CallPreProcessAudioBuffer(int num_calls, float peak_ratio, AgcManagerDirect& manager) { - RTC_DCHECK_GE(1.f, peak_ratio); + RTC_DCHECK_LE(peak_ratio, 1.0f); AudioBuffer audio_buffer(kSampleRateHz, 1, kSampleRateHz, 1, kSampleRateHz, 1); const int num_channels = audio_buffer.num_channels(); const int num_frames = audio_buffer.num_frames(); + + // Make half of the calls with half zeroed frames. for (int ch = 0; ch < num_channels; ++ch) { + // 50% of the samples in one frame are zero. for (int i = 0; i < num_frames; i += 2) { - audio_buffer.channels()[ch][i] = peak_ratio * 32767.f; + audio_buffer.channels()[ch][i] = peak_ratio * 32767.0f; audio_buffer.channels()[ch][i + 1] = 0.0f; } } for (int n = 0; n < num_calls / 2; ++n) { manager.AnalyzePreProcess(&audio_buffer); } + + // Make the remaining half of the calls with frames whose samples are all set. for (int ch = 0; ch < num_channels; ++ch) { for (int i = 0; i < num_frames; ++i) { - audio_buffer.channels()[ch][i] = peak_ratio * 32767.f; + audio_buffer.channels()[ch][i] = peak_ratio * 32767.0f; } } for (int n = 0; n < num_calls - num_calls / 2; ++n) { @@ -112,6 +128,49 @@ void CallPreProcessAudioBuffer(int num_calls, } } +std::string GetAgcMinMicLevelExperimentFieldTrial(int enabled_value) { + RTC_DCHECK_GE(enabled_value, 0); + RTC_DCHECK_LE(enabled_value, 255); + char field_trial_buffer[64]; + rtc::SimpleStringBuilder builder(field_trial_buffer); + builder << "WebRTC-Audio-AgcMinMicLevelExperiment/Enabled-" << enabled_value + << "/"; + return builder.str(); +} + +// (Over)writes `samples_value` for the samples in `audio_buffer`. +// When `clipped_ratio`, a value in [0, 1], is greater than 0, the corresponding +// fraction of the frame is set to a full scale value to simulate clipping. +void WriteAudioBufferSamples(float samples_value, + float clipped_ratio, + AudioBuffer& audio_buffer) { + RTC_DCHECK_GE(samples_value, std::numeric_limits::min()); + RTC_DCHECK_LE(samples_value, std::numeric_limits::max()); + RTC_DCHECK_GE(clipped_ratio, 0.0f); + RTC_DCHECK_LE(clipped_ratio, 1.0f); + int num_channels = audio_buffer.num_channels(); + int num_samples = audio_buffer.num_frames(); + int num_clipping_samples = clipped_ratio * num_samples; + for (int ch = 0; ch < num_channels; ++ch) { + int i = 0; + for (; i < num_clipping_samples; ++i) { + audio_buffer.channels()[ch][i] = 32767.0f; + } + for (; i < num_samples; ++i) { + audio_buffer.channels()[ch][i] = samples_value; + } + } +} + +void CallPreProcessAndProcess(int num_calls, + const AudioBuffer& audio_buffer, + AgcManagerDirect& manager) { + for (int n = 0; n < num_calls; ++n) { + manager.AnalyzePreProcess(&audio_buffer); + manager.Process(&audio_buffer); + } +} + } // namespace class AgcManagerDirectTest : public ::testing::Test { @@ -121,11 +180,16 @@ class AgcManagerDirectTest : public ::testing::Test { manager_(agc_, kInitialVolume, kClippedMin, - kSampleRateHz, kClippedLevelStep, kClippedRatioThreshold, kClippedWaitFrames, ClippingPredictorConfig()), + audio_buffer(kSampleRateHz, + kNumChannels, + kSampleRateHz, + kNumChannels, + kSampleRateHz, + kNumChannels), audio(kNumChannels), audio_data(kNumChannels * kSamplesPerChannel, 0.f) { ExpectInitialize(); @@ -134,6 +198,8 @@ class AgcManagerDirectTest : public ::testing::Test { for (size_t ch = 0; ch < kNumChannels; ++ch) { audio[ch] = &audio_data[ch * kSamplesPerChannel]; } + WriteAudioBufferSamples(/*samples_value=*/0.0f, /*clipped_ratio=*/0.0f, + audio_buffer); } void FirstProcess() { @@ -161,8 +227,8 @@ class AgcManagerDirectTest : public ::testing::Test { void CallProcess(int num_calls) { for (int i = 0; i < num_calls; ++i) { - EXPECT_CALL(*agc_, Process(_, _, _)).WillOnce(Return()); - manager_.Process(nullptr); + EXPECT_CALL(*agc_, Process(_)).WillOnce(Return()); + manager_.Process(&audio_buffer); absl::optional new_digital_gain = manager_.GetDigitalComressionGain(); if (new_digital_gain) { @@ -172,12 +238,13 @@ class AgcManagerDirectTest : public ::testing::Test { } void CallPreProc(int num_calls, float clipped_ratio) { - RTC_DCHECK_GE(1.f, clipped_ratio); + RTC_DCHECK_GE(clipped_ratio, 0.0f); + RTC_DCHECK_LE(clipped_ratio, 1.0f); const int num_clipped = kSamplesPerChannel * clipped_ratio; std::fill(audio_data.begin(), audio_data.end(), 0.f); for (size_t ch = 0; ch < kNumChannels; ++ch) { for (int k = 0; k < num_clipped; ++k) { - audio[ch][k] = 32767.f; + audio[ch][k] = 32767.0f; } } for (int i = 0; i < num_calls; ++i) { @@ -209,6 +276,7 @@ class AgcManagerDirectTest : public ::testing::Test { MockAgc* agc_; MockGainControl gctrl_; AgcManagerDirect manager_; + AudioBuffer audio_buffer; std::vector audio; std::vector audio_data; }; @@ -452,7 +520,7 @@ TEST_F(AgcManagerDirectTest, CompressorReachesMinimum) { TEST_F(AgcManagerDirectTest, NoActionWhileMuted) { manager_.HandleCaptureOutputUsedChange(false); - manager_.Process(nullptr); + manager_.Process(&audio_buffer); absl::optional new_digital_gain = manager_.GetDigitalComressionGain(); if (new_digital_gain) { gctrl_.set_compression_gain_db(*new_digital_gain); @@ -852,27 +920,121 @@ TEST(AgcManagerDirectStandaloneTest, AgcMinMicLevelExperimentOutOfRangeBelow) { // start volume is larger than the min level and should therefore not be // changed. TEST(AgcManagerDirectStandaloneTest, AgcMinMicLevelExperimentEnabled50) { + constexpr int kMinMicLevelOverride = 50; test::ScopedFieldTrials field_trial( - "WebRTC-Audio-AgcMinMicLevelExperiment/Enabled-50/"); + GetAgcMinMicLevelExperimentFieldTrial(kMinMicLevelOverride)); std::unique_ptr manager = CreateAgcManagerDirect(kInitialVolume, kClippedLevelStep, kClippedRatioThreshold, kClippedWaitFrames); - EXPECT_EQ(manager->channel_agcs_[0]->min_mic_level(), 50); + EXPECT_EQ(manager->channel_agcs_[0]->min_mic_level(), kMinMicLevelOverride); EXPECT_EQ(manager->channel_agcs_[0]->startup_min_level(), kInitialVolume); } -// Uses experiment to reduce the default minimum microphone level, start at a -// lower level and ensure that the startup level is increased to the min level -// set by the experiment. +// Checks that, when the "WebRTC-Audio-AgcMinMicLevelExperiment" field trial is +// specified with a valid value, the mic level never gets lowered beyond the +// override value in the presence of clipping. TEST(AgcManagerDirectStandaloneTest, - AgcMinMicLevelExperimentEnabledAboveStartupLevel) { - test::ScopedFieldTrials field_trial( - "WebRTC-Audio-AgcMinMicLevelExperiment/Enabled-50/"); - std::unique_ptr manager = - CreateAgcManagerDirect(/*startup_min_level=*/30, kClippedLevelStep, - kClippedRatioThreshold, kClippedWaitFrames); - EXPECT_EQ(manager->channel_agcs_[0]->min_mic_level(), 50); - EXPECT_EQ(manager->channel_agcs_[0]->startup_min_level(), 50); + AgcMinMicLevelExperimentCheckMinLevelWithClipping) { + constexpr int kMinMicLevelOverride = 250; + + // Create and initialize two AGCs by specifying and leaving unspecified the + // relevant field trial. + const auto factory = []() { + std::unique_ptr manager = + CreateAgcManagerDirect(kInitialVolume, kClippedLevelStep, + kClippedRatioThreshold, kClippedWaitFrames); + manager->Initialize(); + manager->set_stream_analog_level(kInitialVolume); + return manager; + }; + std::unique_ptr manager = factory(); + std::unique_ptr manager_with_override; + { + test::ScopedFieldTrials field_trial( + GetAgcMinMicLevelExperimentFieldTrial(kMinMicLevelOverride)); + manager_with_override = factory(); + } + + // Create a test input signal which containts 80% of clipped samples. + AudioBuffer audio_buffer(kSampleRateHz, 1, kSampleRateHz, 1, kSampleRateHz, + 1); + WriteAudioBufferSamples(/*samples_value=*/4000.0f, /*clipped_ratio=*/0.8f, + audio_buffer); + + // Simulate 4 seconds of clipping; it is expected to trigger a downward + // adjustment of the analog gain. + CallPreProcessAndProcess(/*num_calls=*/400, audio_buffer, *manager); + CallPreProcessAndProcess(/*num_calls=*/400, audio_buffer, + *manager_with_override); + + // Make sure that an adaptation occurred. + ASSERT_GT(manager->stream_analog_level(), 0); + + // Check that the test signal triggers a larger downward adaptation for + // `manager`, which is allowed to reach a lower gain. + EXPECT_GT(manager_with_override->stream_analog_level(), + manager->stream_analog_level()); + // Check that the gain selected by `manager_with_override` equals the minimum + // value overridden via field trial. + EXPECT_EQ(manager_with_override->stream_analog_level(), kMinMicLevelOverride); +} + +// Checks that, when the "WebRTC-Audio-AgcMinMicLevelExperiment" field trial is +// specified with a value lower than the `clipped_level_min`, the behavior of +// the analog gain controller is the same as that obtained when the field trial +// is not specified. +TEST(AgcManagerDirectStandaloneTest, + AgcMinMicLevelExperimentCompareMicLevelWithClipping) { + // Create and initialize two AGCs by specifying and leaving unspecified the + // relevant field trial. + const auto factory = []() { + // Use a large clipped level step to more quickly decrease the analog gain + // with clipping. + auto controller = std::make_unique( + /*num_capture_channels=*/1, kInitialVolume, + kDefaultAnalogConfig.clipped_level_min, + /*disable_digital_adaptive=*/true, /*clipped_level_step=*/64, + kClippedRatioThreshold, kClippedWaitFrames, + kDefaultAnalogConfig.clipping_predictor); + controller->Initialize(); + controller->set_stream_analog_level(kInitialVolume); + return controller; + }; + std::unique_ptr manager = factory(); + std::unique_ptr manager_with_override; + { + constexpr int kMinMicLevelOverride = 20; + static_assert( + kDefaultAnalogConfig.clipped_level_min >= kMinMicLevelOverride, + "Use a lower override value."); + test::ScopedFieldTrials field_trial( + GetAgcMinMicLevelExperimentFieldTrial(kMinMicLevelOverride)); + manager_with_override = factory(); + } + + // Create a test input signal which containts 80% of clipped samples. + AudioBuffer audio_buffer(kSampleRateHz, 1, kSampleRateHz, 1, kSampleRateHz, + 1); + WriteAudioBufferSamples(/*samples_value=*/4000.0f, /*clipped_ratio=*/0.8f, + audio_buffer); + + // Simulate 4 seconds of clipping; it is expected to trigger a downward + // adjustment of the analog gain. + CallPreProcessAndProcess(/*num_calls=*/400, audio_buffer, *manager); + CallPreProcessAndProcess(/*num_calls=*/400, audio_buffer, + *manager_with_override); + + // Make sure that an adaptation occurred. + ASSERT_GT(manager->stream_analog_level(), 0); + + // Check that the selected analog gain is the same for both controllers and + // that it equals the minimum level reached when clipping is handled. That is + // expected because the minimum microphone level override is less than the + // minimum level used when clipping is detected. + EXPECT_EQ(manager->stream_analog_level(), + manager_with_override->stream_analog_level()); + EXPECT_EQ(manager_with_override->stream_analog_level(), + kDefaultAnalogConfig.clipped_level_min); } // TODO(bugs.webrtc.org/12774): Test the bahavior of `clipped_level_step`. @@ -931,17 +1093,20 @@ TEST(AgcManagerDirectStandaloneTest, TEST(AgcManagerDirectStandaloneTest, DisableClippingPredictorDoesNotLowerVolume) { + AudioBuffer audio_buffer(kSampleRateHz, kNumChannels, kSampleRateHz, + kNumChannels, kSampleRateHz, kNumChannels); + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. constexpr ClippingPredictorConfig kConfig{/*enabled=*/false}; AgcManagerDirect manager(new ::testing::NiceMock(), kInitialVolume, - kClippedMin, kSampleRateHz, kClippedLevelStep, + kClippedMin, kClippedLevelStep, kClippedRatioThreshold, kClippedWaitFrames, kConfig); manager.Initialize(); manager.set_stream_analog_level(/*level=*/255); EXPECT_FALSE(manager.clipping_predictor_enabled()); EXPECT_FALSE(manager.use_clipping_predictor_step()); EXPECT_EQ(manager.stream_analog_level(), 255); - manager.Process(nullptr); + manager.Process(&audio_buffer); CallPreProcessAudioBuffer(/*num_calls=*/10, /*peak_ratio=*/0.99f, manager); EXPECT_EQ(manager.stream_analog_level(), 255); CallPreProcessAudioBuffer(/*num_calls=*/300, /*peak_ratio=*/0.99f, manager); @@ -951,48 +1116,183 @@ TEST(AgcManagerDirectStandaloneTest, } TEST(AgcManagerDirectStandaloneTest, - EnableClippingPredictorWithUnusedPredictedStepDoesNotLowerVolume) { - // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. - ClippingPredictorConfig config; - config.enabled = true; - config.use_predicted_step = false; - AgcManagerDirect manager(new ::testing::NiceMock(), kInitialVolume, - kClippedMin, kSampleRateHz, kClippedLevelStep, - kClippedRatioThreshold, kClippedWaitFrames, config); - manager.Initialize(); - manager.set_stream_analog_level(/*level=*/255); - EXPECT_TRUE(manager.clipping_predictor_enabled()); - EXPECT_FALSE(manager.use_clipping_predictor_step()); - EXPECT_EQ(manager.stream_analog_level(), 255); - manager.Process(nullptr); - CallPreProcessAudioBuffer(/*num_calls=*/10, /*peak_ratio=*/0.99f, manager); - EXPECT_EQ(manager.stream_analog_level(), 255); - CallPreProcessAudioBuffer(/*num_calls=*/300, /*peak_ratio=*/0.99f, manager); - EXPECT_EQ(manager.stream_analog_level(), 255); - CallPreProcessAudioBuffer(/*num_calls=*/10, /*peak_ratio=*/0.99f, manager); - EXPECT_EQ(manager.stream_analog_level(), 255); + UsedClippingPredictionsProduceLowerAnalogLevels) { + AudioBuffer audio_buffer(kSampleRateHz, kNumChannels, kSampleRateHz, + kNumChannels, kSampleRateHz, kNumChannels); + + // TODO(bugs.webrtc.org/12874): Use designated initializers once fixed. + ClippingPredictorConfig config_with_prediction; + config_with_prediction.enabled = true; + config_with_prediction.use_predicted_step = true; + AgcManagerDirect manager_with_prediction( + new ::testing::NiceMock(), kInitialVolume, kClippedMin, + kClippedLevelStep, kClippedRatioThreshold, kClippedWaitFrames, + config_with_prediction); + ClippingPredictorConfig config_without_prediction; + config_without_prediction.enabled = false; + AgcManagerDirect manager_without_prediction( + new ::testing::NiceMock(), kInitialVolume, kClippedMin, + kClippedLevelStep, kClippedRatioThreshold, kClippedWaitFrames, + config_without_prediction); + manager_with_prediction.Initialize(); + manager_without_prediction.Initialize(); + constexpr int kInitialLevel = 255; + constexpr float kClippingPeakRatio = 1.0f; + constexpr float kCloseToClippingPeakRatio = 0.99f; + constexpr float kZeroPeakRatio = 0.0f; + manager_with_prediction.set_stream_analog_level(kInitialLevel); + manager_without_prediction.set_stream_analog_level(kInitialLevel); + manager_with_prediction.Process(&audio_buffer); + manager_without_prediction.Process(&audio_buffer); + EXPECT_TRUE(manager_with_prediction.clipping_predictor_enabled()); + EXPECT_FALSE(manager_without_prediction.clipping_predictor_enabled()); + EXPECT_TRUE(manager_with_prediction.use_clipping_predictor_step()); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), kInitialLevel); + EXPECT_EQ(manager_without_prediction.stream_analog_level(), kInitialLevel); + // Expect a change in the analog level when the prediction step is used. + CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio, + manager_with_prediction); + CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio, + manager_without_prediction); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), + kInitialLevel - kClippedLevelStep); + EXPECT_EQ(manager_without_prediction.stream_analog_level(), kInitialLevel); + // Expect no change during waiting. + CallPreProcessAudioBuffer(kClippedWaitFrames, kCloseToClippingPeakRatio, + manager_with_prediction); + CallPreProcessAudioBuffer(kClippedWaitFrames, kCloseToClippingPeakRatio, + manager_without_prediction); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), + kInitialLevel - kClippedLevelStep); + EXPECT_EQ(manager_without_prediction.stream_analog_level(), kInitialLevel); + // Expect a change when the prediction step is used. + CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio, + manager_with_prediction); + CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio, + manager_without_prediction); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), + kInitialLevel - 2 * kClippedLevelStep); + EXPECT_EQ(manager_without_prediction.stream_analog_level(), kInitialLevel); + // Expect no change when clipping is not detected or predicted. + CallPreProcessAudioBuffer(2 * kClippedWaitFrames, kZeroPeakRatio, + manager_with_prediction); + CallPreProcessAudioBuffer(2 * kClippedWaitFrames, kZeroPeakRatio, + manager_without_prediction); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), + kInitialLevel - 2 * kClippedLevelStep); + EXPECT_EQ(manager_without_prediction.stream_analog_level(), kInitialLevel); + // Expect a change for clipping frames. + CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio, + manager_with_prediction); + CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio, + manager_without_prediction); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), + kInitialLevel - 3 * kClippedLevelStep); + EXPECT_EQ(manager_without_prediction.stream_analog_level(), + kInitialLevel - kClippedLevelStep); + // Expect no change during waiting. + CallPreProcessAudioBuffer(kClippedWaitFrames, kClippingPeakRatio, + manager_with_prediction); + CallPreProcessAudioBuffer(kClippedWaitFrames, kClippingPeakRatio, + manager_without_prediction); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), + kInitialLevel - 3 * kClippedLevelStep); + EXPECT_EQ(manager_without_prediction.stream_analog_level(), + kInitialLevel - kClippedLevelStep); + // Expect a change for clipping frames. + CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio, + manager_with_prediction); + CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio, + manager_without_prediction); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), + kInitialLevel - 4 * kClippedLevelStep); + EXPECT_EQ(manager_without_prediction.stream_analog_level(), + kInitialLevel - 2 * kClippedLevelStep); } -TEST(AgcManagerDirectStandaloneTest, EnableClippingPredictorLowersVolume) { - // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. - ClippingPredictorConfig config; - config.enabled = true; - config.use_predicted_step = true; - AgcManagerDirect manager(new ::testing::NiceMock(), kInitialVolume, - kClippedMin, kSampleRateHz, kClippedLevelStep, - kClippedRatioThreshold, kClippedWaitFrames, config); - manager.Initialize(); - manager.set_stream_analog_level(/*level=*/255); - EXPECT_TRUE(manager.clipping_predictor_enabled()); - EXPECT_TRUE(manager.use_clipping_predictor_step()); - EXPECT_EQ(manager.stream_analog_level(), 255); - manager.Process(nullptr); - CallPreProcessAudioBuffer(/*num_calls=*/10, /*peak_ratio=*/0.99f, manager); - EXPECT_EQ(manager.stream_analog_level(), 240); - CallPreProcessAudioBuffer(/*num_calls=*/300, /*peak_ratio=*/0.99f, manager); - EXPECT_EQ(manager.stream_analog_level(), 240); - CallPreProcessAudioBuffer(/*num_calls=*/10, /*peak_ratio=*/0.99f, manager); - EXPECT_EQ(manager.stream_analog_level(), 225); +TEST(AgcManagerDirectStandaloneTest, + UnusedClippingPredictionsProduceEqualAnalogLevels) { + AudioBuffer audio_buffer(kSampleRateHz, kNumChannels, kSampleRateHz, + kNumChannels, kSampleRateHz, kNumChannels); + + // TODO(bugs.webrtc.org/12874): Use designated initializers once fixed. + ClippingPredictorConfig config_with_prediction; + config_with_prediction.enabled = true; + config_with_prediction.use_predicted_step = false; + AgcManagerDirect manager_with_prediction( + new ::testing::NiceMock(), kInitialVolume, kClippedMin, + kClippedLevelStep, kClippedRatioThreshold, kClippedWaitFrames, + config_with_prediction); + ClippingPredictorConfig config_without_prediction; + config_without_prediction.enabled = false; + AgcManagerDirect manager_without_prediction( + new ::testing::NiceMock(), kInitialVolume, kClippedMin, + kClippedLevelStep, kClippedRatioThreshold, kClippedWaitFrames, + config_without_prediction); + constexpr int kInitialLevel = 255; + constexpr float kClippingPeakRatio = 1.0f; + constexpr float kCloseToClippingPeakRatio = 0.99f; + constexpr float kZeroPeakRatio = 0.0f; + manager_with_prediction.Initialize(); + manager_without_prediction.Initialize(); + manager_with_prediction.set_stream_analog_level(kInitialLevel); + manager_without_prediction.set_stream_analog_level(kInitialLevel); + manager_with_prediction.Process(&audio_buffer); + manager_without_prediction.Process(&audio_buffer); + EXPECT_TRUE(manager_with_prediction.clipping_predictor_enabled()); + EXPECT_FALSE(manager_without_prediction.clipping_predictor_enabled()); + EXPECT_FALSE(manager_with_prediction.use_clipping_predictor_step()); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), kInitialLevel); + EXPECT_EQ(manager_without_prediction.stream_analog_level(), kInitialLevel); + // Expect no change in the analog level for non-clipping frames. + CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio, + manager_with_prediction); + CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio, + manager_without_prediction); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), + manager_without_prediction.stream_analog_level()); + // Expect no change for non-clipping frames. + CallPreProcessAudioBuffer(kClippedWaitFrames, kCloseToClippingPeakRatio, + manager_with_prediction); + CallPreProcessAudioBuffer(kClippedWaitFrames, kCloseToClippingPeakRatio, + manager_without_prediction); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), + manager_without_prediction.stream_analog_level()); + // Expect no change for non-clipping frames. + CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio, + manager_with_prediction); + CallPreProcessAudioBuffer(/*num_calls=*/10, kCloseToClippingPeakRatio, + manager_without_prediction); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), + manager_without_prediction.stream_analog_level()); + // Expect no change when clipping is not detected or predicted. + CallPreProcessAudioBuffer(2 * kClippedWaitFrames, kZeroPeakRatio, + manager_with_prediction); + CallPreProcessAudioBuffer(2 * kClippedWaitFrames, kZeroPeakRatio, + manager_without_prediction); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), + manager_without_prediction.stream_analog_level()); + // Expect a change for clipping frames. + CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio, + manager_with_prediction); + CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio, + manager_without_prediction); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), + manager_without_prediction.stream_analog_level()); + // Expect no change during waiting. + CallPreProcessAudioBuffer(kClippedWaitFrames, kClippingPeakRatio, + manager_with_prediction); + CallPreProcessAudioBuffer(kClippedWaitFrames, kClippingPeakRatio, + manager_without_prediction); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), + manager_without_prediction.stream_analog_level()); + // Expect a change for clipping frames. + CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio, + manager_with_prediction); + CallPreProcessAudioBuffer(/*num_calls=*/1, kClippingPeakRatio, + manager_without_prediction); + EXPECT_EQ(manager_with_prediction.stream_analog_level(), + manager_without_prediction.stream_analog_level()); } } // namespace webrtc diff --git a/modules/audio_processing/agc/analog_gain_stats_reporter.cc b/modules/audio_processing/agc/analog_gain_stats_reporter.cc new file mode 100644 index 0000000000..0d8753a7c8 --- /dev/null +++ b/modules/audio_processing/agc/analog_gain_stats_reporter.cc @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/agc/analog_gain_stats_reporter.h" + +#include + +#include "rtc_base/logging.h" +#include "rtc_base/numerics/safe_minmax.h" +#include "system_wrappers/include/metrics.h" + +namespace webrtc { +namespace { + +constexpr int kFramesIn60Seconds = 6000; +constexpr int kMinGain = 0; +constexpr int kMaxGain = 255; +constexpr int kMaxUpdate = kMaxGain - kMinGain; + +float ComputeAverageUpdate(int sum_updates, int num_updates) { + RTC_DCHECK_GE(sum_updates, 0); + RTC_DCHECK_LE(sum_updates, kMaxUpdate * kFramesIn60Seconds); + RTC_DCHECK_GE(num_updates, 0); + RTC_DCHECK_LE(num_updates, kFramesIn60Seconds); + if (num_updates == 0) { + return 0.0f; + } + return std::round(static_cast(sum_updates) / + static_cast(num_updates)); +} +} // namespace + +AnalogGainStatsReporter::AnalogGainStatsReporter() = default; + +AnalogGainStatsReporter::~AnalogGainStatsReporter() = default; + +void AnalogGainStatsReporter::UpdateStatistics(int analog_mic_level) { + RTC_DCHECK_GE(analog_mic_level, kMinGain); + RTC_DCHECK_LE(analog_mic_level, kMaxGain); + if (previous_analog_mic_level_.has_value() && + analog_mic_level != previous_analog_mic_level_.value()) { + const int level_change = + analog_mic_level - previous_analog_mic_level_.value(); + if (level_change < 0) { + ++level_update_stats_.num_decreases; + level_update_stats_.sum_decreases -= level_change; + } else { + ++level_update_stats_.num_increases; + level_update_stats_.sum_increases += level_change; + } + } + // Periodically log analog gain change metrics. + if (++log_level_update_stats_counter_ >= kFramesIn60Seconds) { + LogLevelUpdateStats(); + level_update_stats_ = {}; + log_level_update_stats_counter_ = 0; + } + previous_analog_mic_level_ = analog_mic_level; +} + +void AnalogGainStatsReporter::LogLevelUpdateStats() const { + const float average_decrease = ComputeAverageUpdate( + level_update_stats_.sum_decreases, level_update_stats_.num_decreases); + const float average_increase = ComputeAverageUpdate( + level_update_stats_.sum_increases, level_update_stats_.num_increases); + const int num_updates = + level_update_stats_.num_decreases + level_update_stats_.num_increases; + const float average_update = ComputeAverageUpdate( + level_update_stats_.sum_decreases + level_update_stats_.sum_increases, + num_updates); + RTC_DLOG(LS_INFO) << "Analog gain update rate: " + << "num_updates=" << num_updates + << ", num_decreases=" << level_update_stats_.num_decreases + << ", num_increases=" << level_update_stats_.num_increases; + RTC_DLOG(LS_INFO) << "Analog gain update average: " + << "average_update=" << average_update + << ", average_decrease=" << average_decrease + << ", average_increase=" << average_increase; + RTC_HISTOGRAM_COUNTS_LINEAR( + /*name=*/"WebRTC.Audio.ApmAnalogGainDecreaseRate", + /*sample=*/level_update_stats_.num_decreases, + /*min=*/1, + /*max=*/kFramesIn60Seconds, + /*bucket_count=*/50); + if (level_update_stats_.num_decreases > 0) { + RTC_HISTOGRAM_COUNTS_LINEAR( + /*name=*/"WebRTC.Audio.ApmAnalogGainDecreaseAverage", + /*sample=*/average_decrease, + /*min=*/1, + /*max=*/kMaxUpdate, + /*bucket_count=*/50); + } + RTC_HISTOGRAM_COUNTS_LINEAR( + /*name=*/"WebRTC.Audio.ApmAnalogGainIncreaseRate", + /*sample=*/level_update_stats_.num_increases, + /*min=*/1, + /*max=*/kFramesIn60Seconds, + /*bucket_count=*/50); + if (level_update_stats_.num_increases > 0) { + RTC_HISTOGRAM_COUNTS_LINEAR( + /*name=*/"WebRTC.Audio.ApmAnalogGainIncreaseAverage", + /*sample=*/average_increase, + /*min=*/1, + /*max=*/kMaxUpdate, + /*bucket_count=*/50); + } + RTC_HISTOGRAM_COUNTS_LINEAR( + /*name=*/"WebRTC.Audio.ApmAnalogGainUpdateRate", + /*sample=*/num_updates, + /*min=*/1, + /*max=*/kFramesIn60Seconds, + /*bucket_count=*/50); + if (num_updates > 0) { + RTC_HISTOGRAM_COUNTS_LINEAR( + /*name=*/"WebRTC.Audio.ApmAnalogGainUpdateAverage", + /*sample=*/average_update, + /*min=*/1, + /*max=*/kMaxUpdate, + /*bucket_count=*/50); + } +} + +} // namespace webrtc diff --git a/modules/audio_processing/agc/analog_gain_stats_reporter.h b/modules/audio_processing/agc/analog_gain_stats_reporter.h new file mode 100644 index 0000000000..c9442e8a43 --- /dev/null +++ b/modules/audio_processing/agc/analog_gain_stats_reporter.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AGC_ANALOG_GAIN_STATS_REPORTER_H_ +#define MODULES_AUDIO_PROCESSING_AGC_ANALOG_GAIN_STATS_REPORTER_H_ + +#include "absl/types/optional.h" +#include "rtc_base/gtest_prod_util.h" + +namespace webrtc { + +// Analog gain statistics calculator. Computes aggregate stats based on the +// framewise mic levels processed in `UpdateStatistics()`. Periodically logs the +// statistics into a histogram. +class AnalogGainStatsReporter { + public: + AnalogGainStatsReporter(); + AnalogGainStatsReporter(const AnalogGainStatsReporter&) = delete; + AnalogGainStatsReporter operator=(const AnalogGainStatsReporter&) = delete; + ~AnalogGainStatsReporter(); + + // Updates the stats based on the `analog_mic_level`. Periodically logs the + // stats into a histogram. + void UpdateStatistics(int analog_mic_level); + + private: + FRIEND_TEST_ALL_PREFIXES(AnalogGainStatsReporterTest, + CheckLevelUpdateStatsForEmptyStats); + FRIEND_TEST_ALL_PREFIXES(AnalogGainStatsReporterTest, + CheckLevelUpdateStatsAfterNoGainChange); + FRIEND_TEST_ALL_PREFIXES(AnalogGainStatsReporterTest, + CheckLevelUpdateStatsAfterGainIncrease); + FRIEND_TEST_ALL_PREFIXES(AnalogGainStatsReporterTest, + CheckLevelUpdateStatsAfterGainDecrease); + FRIEND_TEST_ALL_PREFIXES(AnalogGainStatsReporterTest, + CheckLevelUpdateStatsAfterReset); + + // Stores analog gain update stats to enable calculation of update rate and + // average update separately for gain increases and decreases. + struct LevelUpdateStats { + int num_decreases = 0; + int num_increases = 0; + int sum_decreases = 0; + int sum_increases = 0; + } level_update_stats_; + + // Returns a copy of the stored statistics. Use only for testing. + const LevelUpdateStats level_update_stats() const { + return level_update_stats_; + } + + // Computes aggregate stat and logs them into a histogram. + void LogLevelUpdateStats() const; + + int log_level_update_stats_counter_ = 0; + absl::optional previous_analog_mic_level_ = absl::nullopt; +}; +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AGC_ANALOG_GAIN_STATS_REPORTER_H_ diff --git a/modules/audio_processing/agc/analog_gain_stats_reporter_unittest.cc b/modules/audio_processing/agc/analog_gain_stats_reporter_unittest.cc new file mode 100644 index 0000000000..cab52876f6 --- /dev/null +++ b/modules/audio_processing/agc/analog_gain_stats_reporter_unittest.cc @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/agc/analog_gain_stats_reporter.h" + +#include "system_wrappers/include/metrics.h" +#include "test/gmock.h" + +namespace webrtc { +namespace { + +constexpr int kFramesIn60Seconds = 6000; + +TEST(AnalogGainStatsReporterTest, CheckLogLevelUpdateStatsEmpty) { + AnalogGainStatsReporter stats_reporter; + constexpr int kMicLevel = 10; + stats_reporter.UpdateStatistics(kMicLevel); + // Update almost until the periodic logging and reset. + for (int i = 0; i < kFramesIn60Seconds - 2; i += 2) { + stats_reporter.UpdateStatistics(kMicLevel + 2); + stats_reporter.UpdateStatistics(kMicLevel); + } + EXPECT_METRIC_THAT(metrics::Samples("WebRTC.Audio.ApmAnalogGainUpdateRate"), + ::testing::ElementsAre()); + EXPECT_METRIC_THAT(metrics::Samples("WebRTC.Audio.ApmAnalogGainDecreaseRate"), + ::testing::ElementsAre()); + EXPECT_METRIC_THAT(metrics::Samples("WebRTC.Audio.ApmAnalogGainIncreaseRate"), + ::testing::ElementsAre()); + EXPECT_METRIC_THAT( + metrics::Samples("WebRTC.Audio.ApmAnalogGainUpdateAverage"), + ::testing::ElementsAre()); + EXPECT_METRIC_THAT( + metrics::Samples("WebRTC.Audio.ApmAnalogGainDecreaseAverage"), + ::testing::ElementsAre()); + EXPECT_METRIC_THAT( + metrics::Samples("WebRTC.Audio.ApmAnalogGainIncreaseAverage"), + ::testing::ElementsAre()); +} + +TEST(AnalogGainStatsReporterTest, CheckLogLevelUpdateStatsNotEmpty) { + AnalogGainStatsReporter stats_reporter; + constexpr int kMicLevel = 10; + stats_reporter.UpdateStatistics(kMicLevel); + // Update until periodic logging. + for (int i = 0; i < kFramesIn60Seconds; i += 2) { + stats_reporter.UpdateStatistics(kMicLevel + 2); + stats_reporter.UpdateStatistics(kMicLevel); + } + // Update until periodic logging. + for (int i = 0; i < kFramesIn60Seconds; i += 2) { + stats_reporter.UpdateStatistics(kMicLevel + 3); + stats_reporter.UpdateStatistics(kMicLevel); + } + EXPECT_METRIC_THAT( + metrics::Samples("WebRTC.Audio.ApmAnalogGainUpdateRate"), + ::testing::ElementsAre(::testing::Pair(kFramesIn60Seconds - 1, 1), + ::testing::Pair(kFramesIn60Seconds, 1))); + EXPECT_METRIC_THAT( + metrics::Samples("WebRTC.Audio.ApmAnalogGainDecreaseRate"), + ::testing::ElementsAre(::testing::Pair(kFramesIn60Seconds / 2 - 1, 1), + ::testing::Pair(kFramesIn60Seconds / 2, 1))); + EXPECT_METRIC_THAT( + metrics::Samples("WebRTC.Audio.ApmAnalogGainIncreaseRate"), + ::testing::ElementsAre(::testing::Pair(kFramesIn60Seconds / 2, 2))); + EXPECT_METRIC_THAT( + metrics::Samples("WebRTC.Audio.ApmAnalogGainUpdateAverage"), + ::testing::ElementsAre(::testing::Pair(2, 1), ::testing::Pair(3, 1))); + EXPECT_METRIC_THAT( + metrics::Samples("WebRTC.Audio.ApmAnalogGainDecreaseAverage"), + ::testing::ElementsAre(::testing::Pair(2, 1), ::testing::Pair(3, 1))); + EXPECT_METRIC_THAT( + metrics::Samples("WebRTC.Audio.ApmAnalogGainIncreaseAverage"), + ::testing::ElementsAre(::testing::Pair(2, 1), ::testing::Pair(3, 1))); +} +} // namespace + +TEST(AnalogGainStatsReporterTest, CheckLevelUpdateStatsForEmptyStats) { + AnalogGainStatsReporter stats_reporter; + const auto& update_stats = stats_reporter.level_update_stats(); + EXPECT_EQ(update_stats.num_decreases, 0); + EXPECT_EQ(update_stats.sum_decreases, 0); + EXPECT_EQ(update_stats.num_increases, 0); + EXPECT_EQ(update_stats.sum_increases, 0); +} + +TEST(AnalogGainStatsReporterTest, CheckLevelUpdateStatsAfterNoGainChange) { + constexpr int kMicLevel = 10; + AnalogGainStatsReporter stats_reporter; + stats_reporter.UpdateStatistics(kMicLevel); + stats_reporter.UpdateStatistics(kMicLevel); + stats_reporter.UpdateStatistics(kMicLevel); + const auto& update_stats = stats_reporter.level_update_stats(); + EXPECT_EQ(update_stats.num_decreases, 0); + EXPECT_EQ(update_stats.sum_decreases, 0); + EXPECT_EQ(update_stats.num_increases, 0); + EXPECT_EQ(update_stats.sum_increases, 0); +} + +TEST(AnalogGainStatsReporterTest, CheckLevelUpdateStatsAfterGainIncrease) { + constexpr int kMicLevel = 10; + AnalogGainStatsReporter stats_reporter; + stats_reporter.UpdateStatistics(kMicLevel); + stats_reporter.UpdateStatistics(kMicLevel + 4); + stats_reporter.UpdateStatistics(kMicLevel + 5); + const auto& update_stats = stats_reporter.level_update_stats(); + EXPECT_EQ(update_stats.num_decreases, 0); + EXPECT_EQ(update_stats.sum_decreases, 0); + EXPECT_EQ(update_stats.num_increases, 2); + EXPECT_EQ(update_stats.sum_increases, 5); +} + +TEST(AnalogGainStatsReporterTest, CheckLevelUpdateStatsAfterGainDecrease) { + constexpr int kMicLevel = 10; + AnalogGainStatsReporter stats_reporter; + stats_reporter.UpdateStatistics(kMicLevel); + stats_reporter.UpdateStatistics(kMicLevel - 4); + stats_reporter.UpdateStatistics(kMicLevel - 5); + const auto& stats_update = stats_reporter.level_update_stats(); + EXPECT_EQ(stats_update.num_decreases, 2); + EXPECT_EQ(stats_update.sum_decreases, 5); + EXPECT_EQ(stats_update.num_increases, 0); + EXPECT_EQ(stats_update.sum_increases, 0); +} + +TEST(AnalogGainStatsReporterTest, CheckLevelUpdateStatsAfterReset) { + AnalogGainStatsReporter stats_reporter; + constexpr int kMicLevel = 10; + stats_reporter.UpdateStatistics(kMicLevel); + // Update until the periodic reset. + for (int i = 0; i < kFramesIn60Seconds - 2; i += 2) { + stats_reporter.UpdateStatistics(kMicLevel + 2); + stats_reporter.UpdateStatistics(kMicLevel); + } + const auto& stats_before_reset = stats_reporter.level_update_stats(); + EXPECT_EQ(stats_before_reset.num_decreases, kFramesIn60Seconds / 2 - 1); + EXPECT_EQ(stats_before_reset.sum_decreases, kFramesIn60Seconds - 2); + EXPECT_EQ(stats_before_reset.num_increases, kFramesIn60Seconds / 2 - 1); + EXPECT_EQ(stats_before_reset.sum_increases, kFramesIn60Seconds - 2); + stats_reporter.UpdateStatistics(kMicLevel + 2); + const auto& stats_during_reset = stats_reporter.level_update_stats(); + EXPECT_EQ(stats_during_reset.num_decreases, 0); + EXPECT_EQ(stats_during_reset.sum_decreases, 0); + EXPECT_EQ(stats_during_reset.num_increases, 0); + EXPECT_EQ(stats_during_reset.sum_increases, 0); + stats_reporter.UpdateStatistics(kMicLevel); + stats_reporter.UpdateStatistics(kMicLevel + 3); + const auto& stats_after_reset = stats_reporter.level_update_stats(); + EXPECT_EQ(stats_after_reset.num_decreases, 1); + EXPECT_EQ(stats_after_reset.sum_decreases, 2); + EXPECT_EQ(stats_after_reset.num_increases, 1); + EXPECT_EQ(stats_after_reset.sum_increases, 3); +} + +} // namespace webrtc diff --git a/modules/audio_processing/agc/clipping_predictor.cc b/modules/audio_processing/agc/clipping_predictor.cc index 982bbca2ee..58b3a2769c 100644 --- a/modules/audio_processing/agc/clipping_predictor.cc +++ b/modules/audio_processing/agc/clipping_predictor.cc @@ -377,7 +377,7 @@ std::unique_ptr CreateClippingPredictor( config.reference_window_delay, config.clipping_threshold, /*adaptive_step_estimation=*/false); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } } // namespace webrtc diff --git a/modules/audio_processing/agc/clipping_predictor_evaluator.cc b/modules/audio_processing/agc/clipping_predictor_evaluator.cc index 2a4ea922cf..ed7198d119 100644 --- a/modules/audio_processing/agc/clipping_predictor_evaluator.cc +++ b/modules/audio_processing/agc/clipping_predictor_evaluator.cc @@ -30,13 +30,13 @@ int OldestExpectedDetectionIndex(int size, int tail, int capacity) { ClippingPredictorEvaluator::ClippingPredictorEvaluator(int history_size) : history_size_(history_size), ring_buffer_capacity_(history_size + 1), - ring_buffer_(ring_buffer_capacity_), - true_positives_(0), - true_negatives_(0), - false_positives_(0), - false_negatives_(0) { + ring_buffer_(ring_buffer_capacity_) { RTC_DCHECK_GT(history_size_, 0); Reset(); + counters_.true_positives = 0; + counters_.true_negatives = 0; + counters_.false_positives = 0; + counters_.false_negatives = 0; } ClippingPredictorEvaluator::~ClippingPredictorEvaluator() = default; @@ -50,10 +50,6 @@ absl::optional ClippingPredictorEvaluator::Observe( RTC_DCHECK_LT(ring_buffer_tail_, ring_buffer_capacity_); DecreaseTimesToLive(); - if (clipping_predicted) { - // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. - Push(/*expected_detection=*/{/*ttl=*/history_size_, /*detected=*/false}); - } // Clipping is expected if there are expected detections regardless of // whether all the expected detections have been previously matched - i.e., // `ExpectedDetection::detected` is true. @@ -64,7 +60,7 @@ absl::optional ClippingPredictorEvaluator::Observe( prediction_interval = FindEarliestPredictionInterval(); // Add a true positive for each unexpired expected detection. const int num_modified_items = MarkExpectedDetectionAsDetected(); - true_positives_ += num_modified_items; + counters_.true_positives += num_modified_items; RTC_DCHECK(prediction_interval.has_value() || num_modified_items == 0); RTC_DCHECK(!prediction_interval.has_value() || num_modified_items > 0); } else if (clipping_expected && !clipping_detected) { @@ -72,23 +68,37 @@ absl::optional ClippingPredictorEvaluator::Observe( // and that has never been matched before. Note that there is at most one // unmatched expired detection. if (HasExpiredUnmatchedExpectedDetection()) { - false_positives_++; + counters_.false_positives++; } } else if (!clipping_expected && clipping_detected) { - false_negatives_++; + counters_.false_negatives++; } else { RTC_DCHECK(!clipping_expected && !clipping_detected); - true_negatives_++; + counters_.true_negatives++; + } + + if (clipping_predicted) { + // TODO(bugs.webrtc.org/12874): Use designated initializers one fixed. + Push(/*expected_detection=*/{/*ttl=*/history_size_, /*detected=*/false}); } + return prediction_interval; } -void ClippingPredictorEvaluator::Reset() { +void ClippingPredictorEvaluator::RemoveExpectations() { // Empty the ring buffer of expected detections. ring_buffer_tail_ = 0; ring_buffer_size_ = 0; } +void ClippingPredictorEvaluator::Reset() { + counters_.true_positives = 0; + counters_.true_negatives = 0; + counters_.false_positives = 0; + counters_.false_negatives = 0; + RemoveExpectations(); +} + // Cost: O(1). void ClippingPredictorEvaluator::Push(ExpectedDetection value) { ring_buffer_[ring_buffer_tail_] = value; @@ -172,4 +182,33 @@ bool ClippingPredictorEvaluator::HasExpiredUnmatchedExpectedDetection() const { !ring_buffer_[oldest_index].detected; } +absl::optional ComputeClippingPredictionMetrics( + const ClippingPredictionCounters& counters) { + RTC_DCHECK_GE(counters.true_positives, 0); + RTC_DCHECK_GE(counters.true_negatives, 0); + RTC_DCHECK_GE(counters.false_positives, 0); + RTC_DCHECK_GE(counters.false_negatives, 0); + if (counters.true_positives == 0) { + // Both precision and recall are zero in this case and hence the F1 score + // is undefined. + return absl::nullopt; + } + int precision_denominator = + counters.true_positives + counters.false_positives; + int recall_denominator = counters.true_positives + counters.false_negatives; + if (precision_denominator == 0 || recall_denominator == 0) { + // Both precision and recall must be defined. + return absl::nullopt; + } + ClippingPredictionMetrics metrics; + float true_positives = counters.true_positives; + metrics.precision = true_positives / precision_denominator; + metrics.recall = true_positives / recall_denominator; + float f1_score_denominator = metrics.precision + metrics.recall; + RTC_DCHECK_GT(f1_score_denominator, 0.0f); + metrics.f1_score = + 2 * metrics.precision * metrics.recall / f1_score_denominator; + return metrics; +} + } // namespace webrtc diff --git a/modules/audio_processing/agc/clipping_predictor_evaluator.h b/modules/audio_processing/agc/clipping_predictor_evaluator.h index e76f25d5e1..348f753493 100644 --- a/modules/audio_processing/agc/clipping_predictor_evaluator.h +++ b/modules/audio_processing/agc/clipping_predictor_evaluator.h @@ -17,13 +17,21 @@ namespace webrtc { +// Clipping prediction counters. +struct ClippingPredictionCounters { + int true_positives; // TP. + int true_negatives; // TN. + int false_positives; // FP. + int false_negatives; // FN. +}; + // Counts true/false positives/negatives while observing sequences of flag pairs // that indicate whether clipping has been detected and/or if clipping is // predicted. When a true positive is found measures the time interval between // prediction and detection events. -// From the time a prediction is observed and for a period equal to +// After a prediction is observed and for a period equal to // `history_size` calls to `Observe()`, one or more detections are expected. If -// the expectation is met, a true positives is added and the time interval +// the expectation is met, a true positive is added and the time interval // between the earliest prediction and the detection is recorded; otherwise, // when the deadline is reached, a false positive is added. Note that one // detection matches all the expected detections that have not expired - i.e., @@ -48,14 +56,13 @@ class ClippingPredictorEvaluator { absl::optional Observe(bool clipping_detected, bool clipping_predicted); // Removes any expectation recently set after a call to `Observe()` having - // `clipping_predicted` set to true. + // `clipping_predicted` set to true. Counters won't be reset. + void RemoveExpectations(); + + // Resets counters and removes any expectation (see `RemoveExpectations()`). void Reset(); - // Metrics getters. - int true_positives() const { return true_positives_; } - int true_negatives() const { return true_negatives_; } - int false_positives() const { return false_positives_; } - int false_negatives() const { return false_negatives_; } + ClippingPredictionCounters counters() const { return counters_; } private: const int history_size_; @@ -90,13 +97,26 @@ class ClippingPredictorEvaluator { // equal to 0 (expired) and `detected` equal to false (unmatched). bool HasExpiredUnmatchedExpectedDetection() const; - // Metrics. - int true_positives_; - int true_negatives_; - int false_positives_; - int false_negatives_; + // Counters. + ClippingPredictionCounters counters_; }; +// Clipping prediction metrics derived from the clipping prediction counters. +struct ClippingPredictionMetrics { + // Precision (P) is defined as TP / (TP + FP). + float precision; + // Recall (R) is defined as TP / (TP + FN). + float recall; + // The F1 score is defined as 2 * P * R / (P + R). + float f1_score; +}; + +// Derives clipping prediction metrics from the true/false positives/negatives +// `counters`. Returns an unspecified value if one or more metrics are not +// defined. +absl::optional ComputeClippingPredictionMetrics( + const ClippingPredictionCounters& counters); + } // namespace webrtc #endif // MODULES_AUDIO_PROCESSING_AGC_CLIPPING_PREDICTOR_EVALUATOR_H_ diff --git a/modules/audio_processing/agc/clipping_predictor_evaluator_unittest.cc b/modules/audio_processing/agc/clipping_predictor_evaluator_unittest.cc index 1eb83eae61..b2d2797ca5 100644 --- a/modules/audio_processing/agc/clipping_predictor_evaluator_unittest.cc +++ b/modules/audio_processing/agc/clipping_predictor_evaluator_unittest.cc @@ -25,6 +25,7 @@ namespace webrtc { namespace { using testing::Eq; +using testing::Field; using testing::Optional; constexpr bool kDetected = true; @@ -33,19 +34,23 @@ constexpr bool kNotDetected = false; constexpr bool kPredicted = true; constexpr bool kNotPredicted = false; -int SumTrueFalsePositivesNegatives( - const ClippingPredictorEvaluator& evaluator) { - return evaluator.true_positives() + evaluator.true_negatives() + - evaluator.false_positives() + evaluator.false_negatives(); +ClippingPredictionCounters operator-(const ClippingPredictionCounters& lhs, + const ClippingPredictionCounters& rhs) { + return { + lhs.true_positives - rhs.true_positives, + lhs.true_negatives - rhs.true_negatives, + lhs.false_positives - rhs.false_positives, + lhs.false_negatives - rhs.false_negatives, + }; } // Checks the metrics after init - i.e., no call to `Observe()`. -TEST(ClippingPredictorEvaluatorTest, Init) { +TEST(ClippingPredictionEvalTest, Init) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); - EXPECT_EQ(evaluator.true_positives(), 0); - EXPECT_EQ(evaluator.true_negatives(), 0); - EXPECT_EQ(evaluator.false_positives(), 0); - EXPECT_EQ(evaluator.false_negatives(), 0); + EXPECT_EQ(evaluator.counters().true_positives, 0); + EXPECT_EQ(evaluator.counters().true_negatives, 0); + EXPECT_EQ(evaluator.counters().false_positives, 0); + EXPECT_EQ(evaluator.counters().false_negatives, 0); } class ClippingPredictorEvaluatorParameterization @@ -66,21 +71,19 @@ TEST_P(ClippingPredictorEvaluatorParameterization, AtMostOneMetricChanges) { for (int i = 0; i < kNumCalls; ++i) { SCOPED_TRACE(i); // Read metrics before `Observe()` is called. - const int last_tp = evaluator.true_positives(); - const int last_tn = evaluator.true_negatives(); - const int last_fp = evaluator.false_positives(); - const int last_fn = evaluator.false_negatives(); + const auto pre = evaluator.counters(); // `Observe()` a random observation. bool clipping_detected = random_generator.Rand(); bool clipping_predicted = random_generator.Rand(); evaluator.Observe(clipping_detected, clipping_predicted); // Check that at most one metric has changed. + const auto post = evaluator.counters(); int num_changes = 0; - num_changes += last_tp == evaluator.true_positives() ? 0 : 1; - num_changes += last_tn == evaluator.true_negatives() ? 0 : 1; - num_changes += last_fp == evaluator.false_positives() ? 0 : 1; - num_changes += last_fn == evaluator.false_negatives() ? 0 : 1; + num_changes += pre.true_positives == post.true_positives ? 0 : 1; + num_changes += pre.true_negatives == post.true_negatives ? 0 : 1; + num_changes += pre.false_positives == post.false_positives ? 0 : 1; + num_changes += pre.false_negatives == post.false_negatives ? 0 : 1; EXPECT_GE(num_changes, 0); EXPECT_LE(num_changes, 1); } @@ -96,20 +99,18 @@ TEST_P(ClippingPredictorEvaluatorParameterization, MetricsAreWeaklyMonotonic) { for (int i = 0; i < kNumCalls; ++i) { SCOPED_TRACE(i); // Read metrics before `Observe()` is called. - const int last_tp = evaluator.true_positives(); - const int last_tn = evaluator.true_negatives(); - const int last_fp = evaluator.false_positives(); - const int last_fn = evaluator.false_negatives(); + const auto pre = evaluator.counters(); // `Observe()` a random observation. bool clipping_detected = random_generator.Rand(); bool clipping_predicted = random_generator.Rand(); evaluator.Observe(clipping_detected, clipping_predicted); // Check that metrics are weakly monotonic. - EXPECT_GE(evaluator.true_positives(), last_tp); - EXPECT_GE(evaluator.true_negatives(), last_tn); - EXPECT_GE(evaluator.false_positives(), last_fp); - EXPECT_GE(evaluator.false_negatives(), last_fn); + const auto post = evaluator.counters(); + EXPECT_GE(post.true_positives, pre.true_positives); + EXPECT_GE(post.true_negatives, pre.true_negatives); + EXPECT_GE(post.false_positives, pre.false_positives); + EXPECT_GE(post.false_negatives, pre.false_negatives); } } @@ -123,22 +124,20 @@ TEST_P(ClippingPredictorEvaluatorParameterization, BoundedMetricsGrowth) { for (int i = 0; i < kNumCalls; ++i) { SCOPED_TRACE(i); // Read metrics before `Observe()` is called. - const int last_tp = evaluator.true_positives(); - const int last_tn = evaluator.true_negatives(); - const int last_fp = evaluator.false_positives(); - const int last_fn = evaluator.false_negatives(); + const auto pre = evaluator.counters(); // `Observe()` a random observation. bool clipping_detected = random_generator.Rand(); bool clipping_predicted = random_generator.Rand(); evaluator.Observe(clipping_detected, clipping_predicted); + const auto diff = evaluator.counters() - pre; // Check that TPs grow by at most `history_size() + 1`. Such an upper bound // is reached when multiple predictions are matched by a single detection. - EXPECT_LE(evaluator.true_positives() - last_tp, history_size() + 1); - // Check that TNs, FPs and FNs grow by at most one. `max_growth`. - EXPECT_LE(evaluator.true_negatives() - last_tn, 1); - EXPECT_LE(evaluator.false_positives() - last_fp, 1); - EXPECT_LE(evaluator.false_negatives() - last_fn, 1); + EXPECT_LE(diff.true_positives, history_size() + 1); + // Check that TNs, FPs and FNs grow by at most one. + EXPECT_LE(diff.true_negatives, 1); + EXPECT_LE(diff.false_positives, 1); + EXPECT_LE(diff.false_negatives, 1); } } @@ -153,7 +152,7 @@ TEST_P(ClippingPredictorEvaluatorParameterization, for (int i = 0; i < kNumCalls; ++i) { SCOPED_TRACE(i); // Read true positives before `Observe()` is called. - const int last_tp = evaluator.true_positives(); + const int last_tp = evaluator.counters().true_positives; // `Observe()` a random observation. bool clipping_detected = random_generator.Rand(); bool clipping_predicted = random_generator.Rand(); @@ -162,7 +161,7 @@ TEST_P(ClippingPredictorEvaluatorParameterization, // Check that the prediction interval is returned when a true positive is // found. - if (evaluator.true_positives() == last_tp) { + if (evaluator.counters().true_positives == last_tp) { EXPECT_FALSE(prediction_interval.has_value()); } else { EXPECT_TRUE(prediction_interval.has_value()); @@ -171,198 +170,233 @@ TEST_P(ClippingPredictorEvaluatorParameterization, } INSTANTIATE_TEST_SUITE_P( - ClippingPredictorEvaluatorTest, + ClippingPredictionEvalTest, ClippingPredictorEvaluatorParameterization, ::testing::Combine(::testing::Values(4, 8, 15, 16, 23, 42), ::testing::Values(1, 10, 21))); -// Checks that, observing a detection and a prediction after init, produces a -// true positive. -TEST(ClippingPredictorEvaluatorTest, OneTruePositiveAfterInit) { +// Checks that after initialization, when no detection is expected, +// observing no detection and no prediction produces a true negative. +TEST(ClippingPredictionEvalTest, TrueNegativeWithNoDetectNoPredictAfterInit) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + + evaluator.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(evaluator.counters().true_positives, 0); + EXPECT_EQ(evaluator.counters().true_negatives, 1); + EXPECT_EQ(evaluator.counters().false_positives, 0); + EXPECT_EQ(evaluator.counters().false_negatives, 0); +} + +// Checks that after initialization, when no detection is expected, +// observing no detection and prediction produces a true negative. +TEST(ClippingPredictionEvalTest, TrueNegativeWithNoDetectPredictAfterInit) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); - evaluator.Observe(kDetected, kPredicted); - EXPECT_EQ(evaluator.true_positives(), 1); - EXPECT_EQ(evaluator.true_negatives(), 0); - EXPECT_EQ(evaluator.false_positives(), 0); - EXPECT_EQ(evaluator.false_negatives(), 0); + evaluator.Observe(kNotDetected, kPredicted); + EXPECT_EQ(evaluator.counters().true_positives, 0); + EXPECT_EQ(evaluator.counters().true_negatives, 1); + EXPECT_EQ(evaluator.counters().false_positives, 0); + EXPECT_EQ(evaluator.counters().false_negatives, 0); } -// Checks that, observing a detection but no prediction after init, produces a -// false negative. -TEST(ClippingPredictorEvaluatorTest, OneFalseNegativeAfterInit) { +// Checks that after initialization, when no detection is expected, +// observing a detection and no prediction produces a false negative. +TEST(ClippingPredictionEvalTest, FalseNegativeWithDetectNoPredictAfterInit) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); + evaluator.Observe(kDetected, kNotPredicted); - EXPECT_EQ(evaluator.false_negatives(), 1); + EXPECT_EQ(evaluator.counters().true_positives, 0); + EXPECT_EQ(evaluator.counters().true_negatives, 0); + EXPECT_EQ(evaluator.counters().false_positives, 0); + EXPECT_EQ(evaluator.counters().false_negatives, 1); +} + +// Checks that after initialization, when no detection is expected, +// simultaneously observing a detection and a prediction produces a false +// negative. +TEST(ClippingPredictionEvalTest, FalseNegativeWithDetectPredictAfterInit) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); - EXPECT_EQ(evaluator.true_positives(), 0); - EXPECT_EQ(evaluator.true_negatives(), 0); - EXPECT_EQ(evaluator.false_positives(), 0); + evaluator.Observe(kDetected, kPredicted); + EXPECT_EQ(evaluator.counters().true_positives, 0); + EXPECT_EQ(evaluator.counters().true_negatives, 0); + EXPECT_EQ(evaluator.counters().false_positives, 0); + EXPECT_EQ(evaluator.counters().false_negatives, 1); } -// Checks that, observing no detection but a prediction after init, produces a -// false positive after expiration. -TEST(ClippingPredictorEvaluatorTest, OneFalsePositiveAfterInit) { +// Checks that, after removing existing expectations, observing no detection and +// no prediction produces a true negative. +TEST(ClippingPredictionEvalTest, + TrueNegativeWithNoDetectNoPredictAfterRemoveExpectations) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); + + // Set an expectation, then remove it. evaluator.Observe(kNotDetected, kPredicted); - EXPECT_EQ(evaluator.false_positives(), 0); - evaluator.Observe(kNotDetected, kNotPredicted); - evaluator.Observe(kNotDetected, kNotPredicted); + evaluator.RemoveExpectations(); + const auto pre = evaluator.counters(); + evaluator.Observe(kNotDetected, kNotPredicted); - EXPECT_EQ(evaluator.false_positives(), 1); + const auto diff = evaluator.counters() - pre; + EXPECT_EQ(diff.true_positives, 0); + EXPECT_EQ(diff.true_negatives, 1); + EXPECT_EQ(diff.false_positives, 0); + EXPECT_EQ(diff.false_negatives, 0); +} - EXPECT_EQ(evaluator.true_positives(), 0); - EXPECT_EQ(evaluator.true_negatives(), 0); - EXPECT_EQ(evaluator.false_negatives(), 0); +// Checks that, after removing existing expectations, observing no detection and +// a prediction produces a true negative. +TEST(ClippingPredictionEvalTest, + TrueNegativeWithNoDetectPredictAfterRemoveExpectations) { + ClippingPredictorEvaluator evaluator(/*history_size=*/3); + + // Set an expectation, then remove it. + evaluator.Observe(kNotDetected, kPredicted); + evaluator.RemoveExpectations(); + const auto pre = evaluator.counters(); + + evaluator.Observe(kNotDetected, kPredicted); + const auto diff = evaluator.counters() - pre; + EXPECT_EQ(diff.true_positives, 0); + EXPECT_EQ(diff.true_negatives, 1); + EXPECT_EQ(diff.false_positives, 0); + EXPECT_EQ(diff.false_negatives, 0); } -// Checks that, observing no detection and no prediction after init, produces a -// true negative. -TEST(ClippingPredictorEvaluatorTest, OneTrueNegativeAfterInit) { +// Checks that, after removing existing expectations, observing a detection and +// no prediction produces a false negative. +TEST(ClippingPredictionEvalTest, + FalseNegativeWithDetectNoPredictAfterRemoveExpectations) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); - evaluator.Observe(kNotDetected, kNotPredicted); - EXPECT_EQ(evaluator.true_negatives(), 1); - EXPECT_EQ(evaluator.true_positives(), 0); - EXPECT_EQ(evaluator.false_positives(), 0); - EXPECT_EQ(evaluator.false_negatives(), 0); + // Set an expectation, then remove it. + evaluator.Observe(kNotDetected, kPredicted); + evaluator.RemoveExpectations(); + const auto pre = evaluator.counters(); + + evaluator.Observe(kDetected, kNotPredicted); + const auto diff = evaluator.counters() - pre; + EXPECT_EQ(diff.true_positives, 0); + EXPECT_EQ(diff.true_negatives, 0); + EXPECT_EQ(diff.false_positives, 0); + EXPECT_EQ(diff.false_negatives, 1); } -// Checks that the evaluator detects true negatives when clipping is neither -// predicted nor detected. -TEST(ClippingPredictorEvaluatorTest, NeverDetectedAndNotPredicted) { +// Checks that, after removing existing expectations, simultaneously observing a +// detection and a prediction produces a false negative. +TEST(ClippingPredictionEvalTest, + FalseNegativeWithDetectPredictAfterRemoveExpectations) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); - evaluator.Observe(kNotDetected, kNotPredicted); - evaluator.Observe(kNotDetected, kNotPredicted); - evaluator.Observe(kNotDetected, kNotPredicted); - evaluator.Observe(kNotDetected, kNotPredicted); - EXPECT_EQ(evaluator.true_negatives(), 4); - EXPECT_EQ(evaluator.true_positives(), 0); - EXPECT_EQ(evaluator.false_positives(), 0); - EXPECT_EQ(evaluator.false_negatives(), 0); + // Set an expectation, then remove it. + evaluator.Observe(kNotDetected, kPredicted); + evaluator.RemoveExpectations(); + const auto pre = evaluator.counters(); + + evaluator.Observe(kDetected, kPredicted); + const auto diff = evaluator.counters() - pre; + EXPECT_EQ(diff.false_negatives, 1); + EXPECT_EQ(diff.true_positives, 0); + EXPECT_EQ(diff.true_negatives, 0); + EXPECT_EQ(diff.false_positives, 0); } -// Checks that the evaluator detects a false negative when clipping is detected -// but not predicted. -TEST(ClippingPredictorEvaluatorTest, DetectedButNotPredicted) { +// Checks that the evaluator detects true negatives when clipping is neither +// predicted nor detected. +TEST(ClippingPredictionEvalTest, TrueNegativesWhenNeverDetectedOrPredicted) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); evaluator.Observe(kNotDetected, kNotPredicted); evaluator.Observe(kNotDetected, kNotPredicted); evaluator.Observe(kNotDetected, kNotPredicted); - evaluator.Observe(kDetected, kNotPredicted); - EXPECT_EQ(evaluator.false_negatives(), 1); - - EXPECT_EQ(evaluator.true_positives(), 0); - EXPECT_EQ(evaluator.true_negatives(), 3); - EXPECT_EQ(evaluator.false_positives(), 0); + evaluator.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(evaluator.counters().true_negatives, 4); } -// Checks that the evaluator does not detect a false positive when clipping is -// predicted but not detected until the observation period expires. -TEST(ClippingPredictorEvaluatorTest, - PredictedOnceAndNeverDetectedBeforeDeadline) { +// Checks that, until the observation period expires, the evaluator does not +// count a false positive when clipping is predicted and not detected. +TEST(ClippingPredictionEvalTest, PredictedOnceAndNeverDetectedBeforeDeadline) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); evaluator.Observe(kNotDetected, kPredicted); evaluator.Observe(kNotDetected, kNotPredicted); - EXPECT_EQ(evaluator.false_positives(), 0); + EXPECT_EQ(evaluator.counters().false_positives, 0); evaluator.Observe(kNotDetected, kNotPredicted); - EXPECT_EQ(evaluator.false_positives(), 0); + EXPECT_EQ(evaluator.counters().false_positives, 0); evaluator.Observe(kNotDetected, kNotPredicted); - EXPECT_EQ(evaluator.false_positives(), 1); - - EXPECT_EQ(evaluator.true_positives(), 0); - EXPECT_EQ(evaluator.true_negatives(), 0); - EXPECT_EQ(evaluator.false_negatives(), 0); + EXPECT_EQ(evaluator.counters().true_positives, 0); + EXPECT_EQ(evaluator.counters().false_positives, 1); } -// Checks that the evaluator detects a false positive when clipping is predicted -// but detected after the observation period expires. -TEST(ClippingPredictorEvaluatorTest, PredictedOnceButDetectedAfterDeadline) { +// Checks that, after the observation period expires, the evaluator detects a +// false positive when clipping is predicted and detected. +TEST(ClippingPredictionEvalTest, PredictedOnceButDetectedAfterDeadline) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); evaluator.Observe(kNotDetected, kPredicted); evaluator.Observe(kNotDetected, kNotPredicted); evaluator.Observe(kNotDetected, kNotPredicted); evaluator.Observe(kNotDetected, kNotPredicted); evaluator.Observe(kDetected, kNotPredicted); - EXPECT_EQ(evaluator.false_positives(), 1); - - EXPECT_EQ(evaluator.true_positives(), 0); - EXPECT_EQ(evaluator.true_negatives(), 0); - EXPECT_EQ(evaluator.false_negatives(), 1); + EXPECT_EQ(evaluator.counters().true_positives, 0); + EXPECT_EQ(evaluator.counters().false_positives, 1); } // Checks that a prediction followed by a detection counts as true positive. -TEST(ClippingPredictorEvaluatorTest, PredictedOnceAndThenImmediatelyDetected) { +TEST(ClippingPredictionEvalTest, PredictedOnceAndThenImmediatelyDetected) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); evaluator.Observe(kNotDetected, kPredicted); - EXPECT_EQ(evaluator.false_positives(), 0); evaluator.Observe(kDetected, kNotPredicted); - EXPECT_EQ(evaluator.true_positives(), 1); - - EXPECT_EQ(evaluator.true_negatives(), 0); - EXPECT_EQ(evaluator.false_positives(), 0); - EXPECT_EQ(evaluator.false_negatives(), 0); + EXPECT_EQ(evaluator.counters().true_positives, 1); + EXPECT_EQ(evaluator.counters().false_positives, 0); } // Checks that a prediction followed by a delayed detection counts as true // positive if the delay is within the observation period. -TEST(ClippingPredictorEvaluatorTest, PredictedOnceAndDetectedBeforeDeadline) { +TEST(ClippingPredictionEvalTest, PredictedOnceAndDetectedBeforeDeadline) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); evaluator.Observe(kNotDetected, kPredicted); - EXPECT_EQ(evaluator.false_positives(), 0); evaluator.Observe(kNotDetected, kNotPredicted); - EXPECT_EQ(evaluator.false_positives(), 0); evaluator.Observe(kDetected, kNotPredicted); - EXPECT_EQ(evaluator.true_positives(), 1); - - EXPECT_EQ(evaluator.true_negatives(), 0); - EXPECT_EQ(evaluator.false_positives(), 0); - EXPECT_EQ(evaluator.false_negatives(), 0); + EXPECT_EQ(evaluator.counters().true_positives, 1); + EXPECT_EQ(evaluator.counters().false_positives, 0); } // Checks that a prediction followed by a delayed detection counts as true // positive if the delay equals the observation period. -TEST(ClippingPredictorEvaluatorTest, PredictedOnceAndDetectedAtDeadline) { +TEST(ClippingPredictionEvalTest, PredictedOnceAndDetectedAtDeadline) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); evaluator.Observe(kNotDetected, kPredicted); - EXPECT_EQ(evaluator.false_positives(), 0); evaluator.Observe(kNotDetected, kNotPredicted); - EXPECT_EQ(evaluator.false_positives(), 0); evaluator.Observe(kNotDetected, kNotPredicted); - EXPECT_EQ(evaluator.false_positives(), 0); evaluator.Observe(kDetected, kNotPredicted); - EXPECT_EQ(evaluator.true_positives(), 1); - - EXPECT_EQ(evaluator.true_negatives(), 0); - EXPECT_EQ(evaluator.false_positives(), 0); - EXPECT_EQ(evaluator.false_negatives(), 0); + EXPECT_EQ(evaluator.counters().true_positives, 1); + EXPECT_EQ(evaluator.counters().false_positives, 0); } // Checks that a prediction followed by a multiple adjacent detections within // the deadline counts as a single true positive and that, after the deadline, // a detection counts as a false negative. -TEST(ClippingPredictorEvaluatorTest, PredictedOnceAndDetectedMultipleTimes) { +TEST(ClippingPredictionEvalTest, PredictedOnceAndDetectedMultipleTimes) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); evaluator.Observe(kNotDetected, kPredicted); evaluator.Observe(kNotDetected, kNotPredicted); // Multiple detections. evaluator.Observe(kDetected, kNotPredicted); - EXPECT_EQ(evaluator.true_positives(), 1); + EXPECT_EQ(evaluator.counters().true_positives, 1); + EXPECT_EQ(evaluator.counters().false_negatives, 0); + EXPECT_EQ(evaluator.counters().false_positives, 0); evaluator.Observe(kDetected, kNotPredicted); - EXPECT_EQ(evaluator.true_positives(), 1); + EXPECT_EQ(evaluator.counters().true_positives, 1); + EXPECT_EQ(evaluator.counters().false_negatives, 0); + EXPECT_EQ(evaluator.counters().false_positives, 0); // A detection outside of the observation period counts as false negative. evaluator.Observe(kDetected, kNotPredicted); - EXPECT_EQ(evaluator.false_negatives(), 1); - EXPECT_EQ(SumTrueFalsePositivesNegatives(evaluator), 2); - - EXPECT_EQ(evaluator.true_negatives(), 0); - EXPECT_EQ(evaluator.false_positives(), 0); + EXPECT_EQ(evaluator.counters().true_positives, 1); + EXPECT_EQ(evaluator.counters().false_negatives, 1); + EXPECT_EQ(evaluator.counters().false_positives, 0); } -// Checks that a false positive is added when clipping is detected after a too -// early prediction. -TEST(ClippingPredictorEvaluatorTest, +// Checks that when clipping is predicted multiple times, a prediction that is +// observed too early counts as a false positive, whereas the other predictions +// that are matched to a detection count as true positives. +TEST(ClippingPredictionEvalTest, PredictedMultipleTimesAndDetectedOnceAfterDeadline) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); evaluator.Observe(kNotDetected, kPredicted); // ---+ @@ -370,42 +404,40 @@ TEST(ClippingPredictorEvaluatorTest, evaluator.Observe(kNotDetected, kPredicted); // | evaluator.Observe(kNotDetected, kPredicted); // <--+ Not matched. // The time to match a detection after the first prediction expired. - EXPECT_EQ(evaluator.false_positives(), 1); + EXPECT_EQ(evaluator.counters().false_positives, 1); evaluator.Observe(kDetected, kNotPredicted); // The detection above does not match the first prediction because it happened // after the deadline of the 1st prediction. - EXPECT_EQ(evaluator.false_positives(), 1); - - EXPECT_EQ(evaluator.true_positives(), 3); - EXPECT_EQ(evaluator.true_negatives(), 0); - EXPECT_EQ(evaluator.false_negatives(), 0); + EXPECT_EQ(evaluator.counters().false_positives, 1); + // However, the detection matches all the other predictions. + EXPECT_EQ(evaluator.counters().true_positives, 3); + EXPECT_EQ(evaluator.counters().false_negatives, 0); } // Checks that multiple consecutive predictions match the first detection // observed before the expected detection deadline expires. -TEST(ClippingPredictorEvaluatorTest, PredictedMultipleTimesAndDetectedOnce) { +TEST(ClippingPredictionEvalTest, PredictedMultipleTimesAndDetectedOnce) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); evaluator.Observe(kNotDetected, kPredicted); // --+ evaluator.Observe(kNotDetected, kPredicted); // | --+ evaluator.Observe(kNotDetected, kPredicted); // | | --+ evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+ <-+ - EXPECT_EQ(evaluator.true_positives(), 3); + EXPECT_EQ(evaluator.counters().true_positives, 3); // The following observations do not generate any true negatives as they // belong to the observation period of the last prediction - for which a // detection has already been matched. - const int true_negatives = evaluator.true_negatives(); + const int true_negatives = evaluator.counters().true_negatives; evaluator.Observe(kNotDetected, kNotPredicted); evaluator.Observe(kNotDetected, kNotPredicted); - EXPECT_EQ(evaluator.true_negatives(), true_negatives); + EXPECT_EQ(evaluator.counters().true_negatives, true_negatives); - EXPECT_EQ(evaluator.true_negatives(), 0); - EXPECT_EQ(evaluator.false_positives(), 0); - EXPECT_EQ(evaluator.false_negatives(), 0); + EXPECT_EQ(evaluator.counters().false_positives, 0); + EXPECT_EQ(evaluator.counters().false_negatives, 0); } // Checks that multiple consecutive predictions match the multiple detections // observed before the expected detection deadline expires. -TEST(ClippingPredictorEvaluatorTest, +TEST(ClippingPredictionEvalTest, PredictedMultipleTimesAndDetectedMultipleTimes) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); evaluator.Observe(kNotDetected, kPredicted); // --+ @@ -413,22 +445,21 @@ TEST(ClippingPredictorEvaluatorTest, evaluator.Observe(kNotDetected, kPredicted); // | | --+ evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+ <-+ evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+ - EXPECT_EQ(evaluator.true_positives(), 3); + EXPECT_EQ(evaluator.counters().true_positives, 3); // The following observation does not generate a true negative as it belongs // to the observation period of the last prediction - for which two detections // have already been matched. - const int true_negatives = evaluator.true_negatives(); + const int true_negatives = evaluator.counters().true_negatives; evaluator.Observe(kNotDetected, kNotPredicted); - EXPECT_EQ(evaluator.true_negatives(), true_negatives); + EXPECT_EQ(evaluator.counters().true_negatives, true_negatives); - EXPECT_EQ(evaluator.true_negatives(), 0); - EXPECT_EQ(evaluator.false_positives(), 0); - EXPECT_EQ(evaluator.false_negatives(), 0); + EXPECT_EQ(evaluator.counters().false_positives, 0); + EXPECT_EQ(evaluator.counters().false_negatives, 0); } // Checks that multiple consecutive predictions match all the detections // observed before the expected detection deadline expires. -TEST(ClippingPredictorEvaluatorTest, PredictedMultipleTimesAndAllDetected) { +TEST(ClippingPredictionEvalTest, PredictedMultipleTimesAndAllDetected) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); evaluator.Observe(kNotDetected, kPredicted); // --+ evaluator.Observe(kNotDetected, kPredicted); // | --+ @@ -436,16 +467,15 @@ TEST(ClippingPredictorEvaluatorTest, PredictedMultipleTimesAndAllDetected) { evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+ <-+ evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+ evaluator.Observe(kDetected, kNotPredicted); // <-+ - EXPECT_EQ(evaluator.true_positives(), 3); - EXPECT_EQ(evaluator.true_negatives(), 0); - EXPECT_EQ(evaluator.false_positives(), 0); - EXPECT_EQ(evaluator.false_negatives(), 0); + EXPECT_EQ(evaluator.counters().true_positives, 3); + + EXPECT_EQ(evaluator.counters().false_positives, 0); + EXPECT_EQ(evaluator.counters().false_negatives, 0); } // Checks that multiple non-consecutive predictions match all the detections // observed before the expected detection deadline expires. -TEST(ClippingPredictorEvaluatorTest, - PredictedMultipleTimesWithGapAndAllDetected) { +TEST(ClippingPredictionEvalTest, PredictedMultipleTimesWithGapAndAllDetected) { ClippingPredictorEvaluator evaluator(/*history_size=*/3); evaluator.Observe(kNotDetected, kPredicted); // --+ evaluator.Observe(kNotDetected, kNotPredicted); // | @@ -453,10 +483,10 @@ TEST(ClippingPredictorEvaluatorTest, evaluator.Observe(kDetected, kNotPredicted); // <-+ <-+ evaluator.Observe(kDetected, kNotPredicted); // <-+ evaluator.Observe(kDetected, kNotPredicted); // <-+ - EXPECT_EQ(evaluator.true_positives(), 2); - EXPECT_EQ(evaluator.true_negatives(), 0); - EXPECT_EQ(evaluator.false_positives(), 0); - EXPECT_EQ(evaluator.false_negatives(), 0); + EXPECT_EQ(evaluator.counters().true_positives, 2); + + EXPECT_EQ(evaluator.counters().false_positives, 0); + EXPECT_EQ(evaluator.counters().false_negatives, 0); } class ClippingPredictorEvaluatorPredictionIntervalParameterization @@ -467,16 +497,16 @@ class ClippingPredictorEvaluatorPredictionIntervalParameterization }; // Checks that the minimum prediction interval is returned if clipping is -// correctly predicted as soon as detected - i.e., no anticipation. +// correctly predicted just before clipping is detected - i.e., smallest +// anticipation. TEST_P(ClippingPredictorEvaluatorPredictionIntervalParameterization, MinimumPredictionInterval) { ClippingPredictorEvaluator evaluator(history_size()); for (int i = 0; i < num_extra_observe_calls(); ++i) { EXPECT_EQ(evaluator.Observe(kNotDetected, kNotPredicted), absl::nullopt); } - absl::optional prediction_interval = - evaluator.Observe(kDetected, kPredicted); - EXPECT_THAT(prediction_interval, Optional(Eq(0))); + EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt); + EXPECT_THAT(evaluator.Observe(kDetected, kNotPredicted), Optional(Eq(1))); } // Checks that a prediction interval between the minimum and the maximum is @@ -491,9 +521,7 @@ TEST_P(ClippingPredictorEvaluatorPredictionIntervalParameterization, EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt); EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt); EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt); - absl::optional prediction_interval = - evaluator.Observe(kDetected, kPredicted); - EXPECT_THAT(prediction_interval, Optional(Eq(3))); + EXPECT_THAT(evaluator.Observe(kDetected, kNotPredicted), Optional(Eq(3))); } // Checks that the maximum prediction interval is returned if clipping is @@ -507,9 +535,8 @@ TEST_P(ClippingPredictorEvaluatorPredictionIntervalParameterization, for (int i = 0; i < history_size(); ++i) { EXPECT_EQ(evaluator.Observe(kNotDetected, kPredicted), absl::nullopt); } - absl::optional prediction_interval = - evaluator.Observe(kDetected, kPredicted); - EXPECT_THAT(prediction_interval, Optional(Eq(history_size()))); + EXPECT_THAT(evaluator.Observe(kDetected, kNotPredicted), + Optional(Eq(history_size()))); } // Checks that `Observe()` returns the prediction interval as soon as a true @@ -525,7 +552,7 @@ TEST_P(ClippingPredictorEvaluatorPredictionIntervalParameterization, } // Observe a detection. absl::optional prediction_interval = - evaluator.Observe(kDetected, kPredicted); + evaluator.Observe(kDetected, kNotPredicted); EXPECT_TRUE(prediction_interval.has_value()); // `Observe()` does not return a prediction interval anymore during ongoing // detections observed while a detection is still expected. @@ -535,34 +562,202 @@ TEST_P(ClippingPredictorEvaluatorPredictionIntervalParameterization, } INSTANTIATE_TEST_SUITE_P( - ClippingPredictorEvaluatorTest, + ClippingPredictionEvalTest, ClippingPredictorEvaluatorPredictionIntervalParameterization, - ::testing::Combine(::testing::Values(0, 3, 5), ::testing::Values(7, 11))); + ::testing::Combine(::testing::Values(1, 3, 5), ::testing::Values(7, 11))); -// Checks that, when a detection is expected, the expectation is removed if and -// only if `Reset()` is called after a prediction is observed. -TEST(ClippingPredictorEvaluatorTest, NoFalsePositivesAfterReset) { +// Checks that, when a detection is expected, the expectation is not removed +// before the detection deadline expires unless `RemoveExpectations()` is +// called. +TEST(ClippingPredictionEvalTest, NoFalsePositivesAfterRemoveExpectations) { constexpr int kHistorySize = 2; - ClippingPredictorEvaluator with_reset(kHistorySize); - with_reset.Observe(kNotDetected, kPredicted); - with_reset.Reset(); - with_reset.Observe(kNotDetected, kNotPredicted); - with_reset.Observe(kNotDetected, kNotPredicted); - EXPECT_EQ(with_reset.true_positives(), 0); - EXPECT_EQ(with_reset.true_negatives(), 2); - EXPECT_EQ(with_reset.false_positives(), 0); - EXPECT_EQ(with_reset.false_negatives(), 0); - - ClippingPredictorEvaluator no_reset(kHistorySize); - no_reset.Observe(kNotDetected, kPredicted); - no_reset.Observe(kNotDetected, kNotPredicted); - no_reset.Observe(kNotDetected, kNotPredicted); - EXPECT_EQ(no_reset.true_positives(), 0); - EXPECT_EQ(no_reset.true_negatives(), 0); - EXPECT_EQ(no_reset.false_positives(), 1); - EXPECT_EQ(no_reset.false_negatives(), 0); + // Case 1: `RemoveExpectations()` is NOT called. + ClippingPredictorEvaluator e1(kHistorySize); + e1.Observe(kNotDetected, kPredicted); + ASSERT_EQ(e1.counters().true_negatives, 1); + e1.Observe(kNotDetected, kNotPredicted); + e1.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(e1.counters().true_positives, 0); + EXPECT_EQ(e1.counters().true_negatives, 1); + EXPECT_EQ(e1.counters().false_positives, 1); + EXPECT_EQ(e1.counters().false_negatives, 0); + + // Case 2: `RemoveExpectations()` is called. + ClippingPredictorEvaluator e2(kHistorySize); + e2.Observe(kNotDetected, kPredicted); + ASSERT_EQ(e2.counters().true_negatives, 1); + e2.RemoveExpectations(); + e2.Observe(kNotDetected, kNotPredicted); + e2.Observe(kNotDetected, kNotPredicted); + EXPECT_EQ(e2.counters().true_positives, 0); + EXPECT_EQ(e2.counters().true_negatives, 3); + EXPECT_EQ(e2.counters().false_positives, 0); + EXPECT_EQ(e2.counters().false_negatives, 0); +} + +class ComputeClippingPredictionMetricsParameterization + : public ::testing::TestWithParam { + protected: + int true_negatives() const { return GetParam(); } +}; + +// Checks that `ComputeClippingPredictionMetrics()` does not return metrics if +// precision cannot be defined - i.e., TP + FP is zero. +TEST_P(ComputeClippingPredictionMetricsParameterization, + NoMetricsWithUndefinedPrecision) { + EXPECT_EQ(ComputeClippingPredictionMetrics( + /*counters=*/{/*true_positives=*/0, + /*true_negatives=*/true_negatives(), + /*false_positives=*/0, + /*false_negatives=*/0}), + absl::nullopt); + EXPECT_EQ(ComputeClippingPredictionMetrics( + /*counters=*/{/*true_positives=*/0, + /*true_negatives=*/true_negatives(), + /*false_positives=*/0, + /*false_negatives=*/1}), + absl::nullopt); } +// Checks that `ComputeClippingPredictionMetrics()` does not return metrics if +// recall cannot be defined - i.e., TP + FN is zero. +TEST_P(ComputeClippingPredictionMetricsParameterization, + NoMetricsWithUndefinedRecall) { + EXPECT_EQ(ComputeClippingPredictionMetrics( + /*counters=*/{/*true_positives=*/0, + /*true_negatives=*/true_negatives(), + /*false_positives=*/0, + /*false_negatives=*/0}), + absl::nullopt); + EXPECT_EQ(ComputeClippingPredictionMetrics( + /*counters=*/{/*true_positives=*/0, + /*true_negatives=*/true_negatives(), + /*false_positives=*/1, + /*false_negatives=*/0}), + absl::nullopt); +} + +// Checks that `ComputeClippingPredictionMetrics()` does not return metrics if +// the F1 score cannot be defined - i.e., P + R is zero. +TEST_P(ComputeClippingPredictionMetricsParameterization, + NoMetricsWithUndefinedF1Score) { + EXPECT_EQ(ComputeClippingPredictionMetrics( + /*counters=*/{/*true_positives=*/0, + /*true_negatives=*/true_negatives(), + /*false_positives=*/1, + /*false_negatives=*/1}), + absl::nullopt); +} + +// Checks that the highest precision is reached when there are no false +// positives. +TEST_P(ComputeClippingPredictionMetricsParameterization, HighestPrecision) { + EXPECT_THAT(ComputeClippingPredictionMetrics( + /*counters=*/{/*true_positives=*/1, + /*true_negatives=*/true_negatives(), + /*false_positives=*/0, + /*false_negatives=*/1}), + Optional(Field(&ClippingPredictionMetrics::precision, Eq(1.0f)))); +} + +// Checks that the highest recall is reached when there are no false +// negatives. +TEST_P(ComputeClippingPredictionMetricsParameterization, HighestRecall) { + EXPECT_THAT(ComputeClippingPredictionMetrics( + /*counters=*/{/*true_positives=*/1, + /*true_negatives=*/true_negatives(), + /*false_positives=*/1, + /*false_negatives=*/0}), + Optional(Field(&ClippingPredictionMetrics::recall, Eq(1.0f)))); +} + +// Checks that 50% precision and 50% recall is reached when the number of true +// positives, false positives and false negatives are the same. +TEST_P(ComputeClippingPredictionMetricsParameterization, + PrecisionAndRecall50Percent) { + absl::optional metrics = + ComputeClippingPredictionMetrics( + /*counters=*/{/*true_positives=*/42, + /*true_negatives=*/true_negatives(), + /*false_positives=*/42, + /*false_negatives=*/42}); + ASSERT_TRUE(metrics.has_value()); + EXPECT_EQ(metrics->precision, 0.5f); + EXPECT_EQ(metrics->recall, 0.5f); + EXPECT_EQ(metrics->f1_score, 0.5f); +} + +// Checks that the highest precision, recall and F1 score are jointly reached +// when there are no false positives and no false negatives. +TEST_P(ComputeClippingPredictionMetricsParameterization, + HighestPrecisionRecallF1Score) { + absl::optional metrics = + ComputeClippingPredictionMetrics( + /*counters=*/{/*true_positives=*/123, + /*true_negatives=*/true_negatives(), + /*false_positives=*/0, + /*false_negatives=*/0}); + ASSERT_TRUE(metrics.has_value()); + EXPECT_EQ(metrics->precision, 1.0f); + EXPECT_EQ(metrics->recall, 1.0f); + EXPECT_EQ(metrics->f1_score, 1.0f); +} + +// Checks that precision is lower than recall when there are more false +// positives than false negatives. +TEST_P(ComputeClippingPredictionMetricsParameterization, + PrecisionLowerThanRecall) { + absl::optional metrics = + ComputeClippingPredictionMetrics( + /*counters=*/{/*true_positives=*/1, + /*true_negatives=*/true_negatives(), + /*false_positives=*/8, + /*false_negatives=*/1}); + ASSERT_TRUE(metrics.has_value()); + EXPECT_LT(metrics->precision, metrics->recall); +} + +// Checks that precision is greater than recall when there are less false +// positives than false negatives. +TEST_P(ComputeClippingPredictionMetricsParameterization, + PrecisionGreaterThanRecall) { + absl::optional metrics = + ComputeClippingPredictionMetrics( + /*counters=*/{/*true_positives=*/1, + /*true_negatives=*/true_negatives(), + /*false_positives=*/1, + /*false_negatives=*/8}); + ASSERT_TRUE(metrics.has_value()); + EXPECT_GT(metrics->precision, metrics->recall); +} + +// Checks that swapping precision and recall does not change the F1 score. +TEST_P(ComputeClippingPredictionMetricsParameterization, SameF1Score) { + absl::optional m1 = + ComputeClippingPredictionMetrics( + /*counters=*/{/*true_positives=*/1, + /*true_negatives=*/true_negatives(), + /*false_positives=*/8, + /*false_negatives=*/1}); + absl::optional m2 = + ComputeClippingPredictionMetrics( + /*counters=*/{/*true_positives=*/1, + /*true_negatives=*/true_negatives(), + /*false_positives=*/1, + /*false_negatives=*/8}); + // Preconditions. + ASSERT_TRUE(m1.has_value()); + ASSERT_TRUE(m2.has_value()); + ASSERT_EQ(m1->precision, m2->recall); + ASSERT_EQ(m1->recall, m2->precision); + // Same F1 score. + EXPECT_EQ(m1->f1_score, m2->f1_score); +} + +INSTANTIATE_TEST_SUITE_P(ClippingPredictionEvalTest, + ComputeClippingPredictionMetricsParameterization, + ::testing::Values(0, 1, 11)); + } // namespace } // namespace webrtc diff --git a/modules/audio_processing/agc/gain_control.h b/modules/audio_processing/agc/gain_control.h index f8c706b9ab..389b2114af 100644 --- a/modules/audio_processing/agc/gain_control.h +++ b/modules/audio_processing/agc/gain_control.h @@ -20,12 +20,12 @@ namespace webrtc { // Recommended to be enabled on the client-side. class GainControl { public: - // When an analog mode is set, this must be called prior to |ProcessStream()| + // When an analog mode is set, this must be called prior to `ProcessStream()` // to pass the current analog level from the audio HAL. Must be within the - // range provided to |set_analog_level_limits()|. + // range provided to `set_analog_level_limits()`. virtual int set_stream_analog_level(int level) = 0; - // When an analog mode is set, this should be called after |ProcessStream()| + // When an analog mode is set, this should be called after `ProcessStream()` // to obtain the recommended new analog level for the audio HAL. It is the // users responsibility to apply this level. virtual int stream_analog_level() const = 0; @@ -33,7 +33,7 @@ class GainControl { enum Mode { // Adaptive mode intended for use if an analog volume control is available // on the capture device. It will require the user to provide coupling - // between the OS mixer controls and AGC through the |stream_analog_level()| + // between the OS mixer controls and AGC through the `stream_analog_level()` // functions. // // It consists of an analog gain prescription for the audio device and a @@ -61,7 +61,7 @@ class GainControl { virtual int set_mode(Mode mode) = 0; virtual Mode mode() const = 0; - // Sets the target peak |level| (or envelope) of the AGC in dBFs (decibels + // Sets the target peak `level` (or envelope) of the AGC in dBFs (decibels // from digital full-scale). The convention is to use positive values. For // instance, passing in a value of 3 corresponds to -3 dBFs, or a target // level 3 dB below full-scale. Limited to [0, 31]. @@ -71,7 +71,7 @@ class GainControl { virtual int set_target_level_dbfs(int level) = 0; virtual int target_level_dbfs() const = 0; - // Sets the maximum |gain| the digital compression stage may apply, in dB. A + // Sets the maximum `gain` the digital compression stage may apply, in dB. A // higher number corresponds to greater compression, while a value of 0 will // leave the signal uncompressed. Limited to [0, 90]. virtual int set_compression_gain_db(int gain) = 0; @@ -83,7 +83,7 @@ class GainControl { virtual int enable_limiter(bool enable) = 0; virtual bool is_limiter_enabled() const = 0; - // Sets the |minimum| and |maximum| analog levels of the audio capture device. + // Sets the `minimum` and `maximum` analog levels of the audio capture device. // Must be set if and only if an analog mode is used. Limited to [0, 65535]. virtual int set_analog_level_limits(int minimum, int maximum) = 0; virtual int analog_level_minimum() const = 0; diff --git a/modules/audio_processing/agc/legacy/analog_agc.cc b/modules/audio_processing/agc/legacy/analog_agc.cc index b53e3f97a3..e40a3f1629 100644 --- a/modules/audio_processing/agc/legacy/analog_agc.cc +++ b/modules/audio_processing/agc/legacy/analog_agc.cc @@ -160,7 +160,7 @@ int WebRtcAgc_AddMic(void* state, /* apply slowly varying digital gain */ if (stt->micVol > stt->maxAnalog) { - /* |maxLevel| is strictly >= |micVol|, so this condition should be + /* `maxLevel` is strictly >= `micVol`, so this condition should be * satisfied here, ensuring there is no divide-by-zero. */ RTC_DCHECK_GT(stt->maxLevel, stt->maxAnalog); diff --git a/modules/audio_processing/agc/legacy/digital_agc.cc b/modules/audio_processing/agc/legacy/digital_agc.cc index 185e84981c..4cd86acba8 100644 --- a/modules/audio_processing/agc/legacy/digital_agc.cc +++ b/modules/audio_processing/agc/legacy/digital_agc.cc @@ -79,10 +79,9 @@ int32_t WebRtcAgc_CalculateGainTable(int32_t* gainTable, // Q16 uint16_t constMaxGain; uint16_t tmpU16, intPart, fracPart; const int16_t kCompRatio = 3; - const int16_t kSoftLimiterLeft = 1; int16_t limiterOffset = 0; // Limiter offset int16_t limiterIdx, limiterLvlX; - int16_t constLinApprox, zeroGainLvl, maxGain, diffGain; + int16_t constLinApprox, maxGain, diffGain; int16_t i, tmp16, tmp16no1; int zeros, zerosScale; @@ -98,17 +97,11 @@ int32_t WebRtcAgc_CalculateGainTable(int32_t* gainTable, // Q16 WebRtcSpl_DivW32W16ResW16(tmp32no1 + (kCompRatio >> 1), kCompRatio); maxGain = WEBRTC_SPL_MAX(tmp16no1, (analogTarget - targetLevelDbfs)); tmp32no1 = maxGain * kCompRatio; - zeroGainLvl = digCompGaindB; - zeroGainLvl -= WebRtcSpl_DivW32W16ResW16(tmp32no1 + ((kCompRatio - 1) >> 1), - kCompRatio - 1); if ((digCompGaindB <= analogTarget) && (limiterEnable)) { - zeroGainLvl += (analogTarget - digCompGaindB + kSoftLimiterLeft); limiterOffset = 0; } - // Calculate the difference between maximum gain and gain at 0dB0v: - // diffGain = maxGain + (compRatio-1)*zeroGainLvl/compRatio - // = (compRatio-1)*digCompGaindB/compRatio + // Calculate the difference between maximum gain and gain at 0dB0v tmp32no1 = digCompGaindB * (kCompRatio - 1); diffGain = WebRtcSpl_DivW32W16ResW16(tmp32no1 + (kCompRatio >> 1), kCompRatio); @@ -191,9 +184,9 @@ int32_t WebRtcAgc_CalculateGainTable(int32_t* gainTable, // Q16 numFIX -= (int32_t)logApprox * diffGain; // Q14 // Calculate ratio - // Shift |numFIX| as much as possible. - // Ensure we avoid wrap-around in |den| as well. - if (numFIX > (den >> 8) || -numFIX > (den >> 8)) { // |den| is Q8. + // Shift `numFIX` as much as possible. + // Ensure we avoid wrap-around in `den` as well. + if (numFIX > (den >> 8) || -numFIX > (den >> 8)) { // `den` is Q8. zeros = WebRtcSpl_NormW32(numFIX); } else { zeros = WebRtcSpl_NormW32(den) + 8; @@ -294,15 +287,12 @@ int32_t WebRtcAgc_ComputeDigitalGains(DigitalAgc* stt, int16_t gate, gain_adj; int16_t k; size_t n, L; - int16_t L2; // samples/subframe // determine number of samples per ms if (FS == 8000) { L = 8; - L2 = 3; } else if (FS == 16000 || FS == 32000 || FS == 48000) { L = 16; - L2 = 4; } else { return -1; } diff --git a/modules/audio_processing/agc/loudness_histogram.cc b/modules/audio_processing/agc/loudness_histogram.cc index 4775ff772e..b0a1f53b97 100644 --- a/modules/audio_processing/agc/loudness_histogram.cc +++ b/modules/audio_processing/agc/loudness_histogram.cc @@ -114,7 +114,7 @@ void LoudnessHistogram::RemoveOldestEntryAndUpdate() { void LoudnessHistogram::RemoveTransient() { // Don't expect to be here if high-activity region is longer than - // |kTransientWidthThreshold| or there has not been any transient. + // `kTransientWidthThreshold` or there has not been any transient. RTC_DCHECK_LE(len_high_activity_, kTransientWidthThreshold); int index = (buffer_index_ > 0) ? (buffer_index_ - 1) : len_circular_buffer_ - 1; diff --git a/modules/audio_processing/agc/loudness_histogram.h b/modules/audio_processing/agc/loudness_histogram.h index badd443a5d..51b38714c2 100644 --- a/modules/audio_processing/agc/loudness_histogram.h +++ b/modules/audio_processing/agc/loudness_histogram.h @@ -25,7 +25,7 @@ class LoudnessHistogram { static LoudnessHistogram* Create(); // Create a sliding LoudnessHistogram, i.e. the histogram represents the last - // |window_size| samples. + // `window_size` samples. static LoudnessHistogram* Create(int window_size); ~LoudnessHistogram(); @@ -49,7 +49,7 @@ class LoudnessHistogram { LoudnessHistogram(); explicit LoudnessHistogram(int window); - // Find the histogram bin associated with the given |rms|. + // Find the histogram bin associated with the given `rms`. int GetBinIndex(double rms); void RemoveOldestEntryAndUpdate(); @@ -63,10 +63,10 @@ class LoudnessHistogram { // Number of times the histogram is updated int num_updates_; // Audio content, this should be equal to the sum of the components of - // |bin_count_q10_|. + // `bin_count_q10_`. int64_t audio_content_q10_; - // LoudnessHistogram of input RMS in Q10 with |kHistSize_| bins. In each + // LoudnessHistogram of input RMS in Q10 with `kHistSize_` bins. In each // 'Update(),' we increment the associated histogram-bin with the given // probability. The increment is implemented in Q10 to avoid rounding errors. int64_t bin_count_q10_[kHistSize]; diff --git a/modules/audio_processing/agc/mock_agc.h b/modules/audio_processing/agc/mock_agc.h index 0ef41c6e52..3080e1563c 100644 --- a/modules/audio_processing/agc/mock_agc.h +++ b/modules/audio_processing/agc/mock_agc.h @@ -11,6 +11,7 @@ #ifndef MODULES_AUDIO_PROCESSING_AGC_MOCK_AGC_H_ #define MODULES_AUDIO_PROCESSING_AGC_MOCK_AGC_H_ +#include "api/array_view.h" #include "modules/audio_processing/agc/agc.h" #include "test/gmock.h" @@ -19,10 +20,7 @@ namespace webrtc { class MockAgc : public Agc { public: virtual ~MockAgc() {} - MOCK_METHOD(void, - Process, - (const int16_t* audio, size_t length, int sample_rate_hz), - (override)); + MOCK_METHOD(void, Process, (rtc::ArrayView audio), (override)); MOCK_METHOD(bool, GetRmsErrorDb, (int* error), (override)); MOCK_METHOD(void, Reset, (), (override)); MOCK_METHOD(int, set_target_level_dbfs, (int level), (override)); diff --git a/modules/audio_processing/agc2/BUILD.gn b/modules/audio_processing/agc2/BUILD.gn index 4c6cfab576..e12252806b 100644 --- a/modules/audio_processing/agc2/BUILD.gn +++ b/modules/audio_processing/agc2/BUILD.gn @@ -17,10 +17,10 @@ group("agc2") { rtc_library("adaptive_digital") { sources = [ - "adaptive_agc.cc", - "adaptive_agc.h", "adaptive_digital_gain_applier.cc", "adaptive_digital_gain_applier.h", + "adaptive_digital_gain_controller.cc", + "adaptive_digital_gain_controller.h", "adaptive_mode_level_estimator.cc", "adaptive_mode_level_estimator.h", "saturation_protector.cc", @@ -29,6 +29,11 @@ rtc_library("adaptive_digital") { "saturation_protector_buffer.h", ] + visibility = [ + "..:gain_controller2", + "./*", + ] + configs += [ "..:apm_debug_dump" ] deps = [ @@ -36,7 +41,7 @@ rtc_library("adaptive_digital") { ":cpu_features", ":gain_applier", ":noise_level_estimator", - ":rnn_vad_with_level", + ":vad_wrapper", "..:api", "..:apm_logging", "..:audio_frame_view", @@ -79,6 +84,12 @@ rtc_library("fixed_digital") { "limiter.h", ] + visibility = [ + "..:gain_controller2", + "../../audio_mixer:audio_mixer_impl", + "./*", + ] + configs += [ "..:apm_debug_dump" ] deps = [ @@ -90,6 +101,7 @@ rtc_library("fixed_digital") { "../../../rtc_base:checks", "../../../rtc_base:gtest_prod", "../../../rtc_base:rtc_base_approved", + "../../../rtc_base:safe_conversions", "../../../rtc_base:safe_minmax", "../../../system_wrappers:metrics", ] @@ -100,6 +112,12 @@ rtc_library("gain_applier") { "gain_applier.cc", "gain_applier.h", ] + + visibility = [ + "..:gain_controller2", + "./*", + ] + deps = [ ":common", "..:audio_frame_view", @@ -110,34 +128,31 @@ rtc_library("gain_applier") { rtc_library("noise_level_estimator") { sources = [ - "down_sampler.cc", - "down_sampler.h", "noise_level_estimator.cc", "noise_level_estimator.h", - "noise_spectrum_estimator.cc", - "noise_spectrum_estimator.h", - "signal_classifier.cc", - "signal_classifier.h", ] + visibility = [ "./*" ] deps = [ ":biquad_filter", "..:apm_logging", "..:audio_frame_view", "../../../api:array_view", - "../../../common_audio", - "../../../common_audio/third_party/ooura:fft_size_128", "../../../rtc_base:checks", - "../../../rtc_base:macromagic", "../../../system_wrappers", ] configs += [ "..:apm_debug_dump" ] } -rtc_library("rnn_vad_with_level") { +rtc_library("vad_wrapper") { sources = [ - "vad_with_level.cc", - "vad_with_level.h", + "vad_wrapper.cc", + "vad_wrapper.h", + ] + + visibility = [ + "..:gain_controller2", + "./*", ] defines = [] @@ -163,7 +178,12 @@ rtc_library("cpu_features") { "cpu_features.cc", "cpu_features.h", ] - visibility = [ "./*" ] + + visibility = [ + "..:gain_controller2", + "./*", + ] + deps = [ "../../../rtc_base:stringutils", "../../../rtc_base/system:arch", @@ -187,6 +207,7 @@ rtc_library("adaptive_digital_unittests") { ":common", ":gain_applier", ":test_utils", + "..:api", "..:apm_logging", "..:audio_frame_view", "../../../api:array_view", @@ -241,10 +262,7 @@ rtc_library("noise_estimator_unittests") { testonly = true configs += [ "..:apm_debug_dump" ] - sources = [ - "noise_level_estimator_unittest.cc", - "signal_classifier_unittest.cc", - ] + sources = [ "noise_level_estimator_unittest.cc" ] deps = [ ":noise_level_estimator", ":test_utils", @@ -258,13 +276,14 @@ rtc_library("noise_estimator_unittests") { ] } -rtc_library("rnn_vad_with_level_unittests") { +rtc_library("vad_wrapper_unittests") { testonly = true - sources = [ "vad_with_level_unittest.cc" ] + sources = [ "vad_wrapper_unittest.cc" ] deps = [ ":common", - ":rnn_vad_with_level", + ":vad_wrapper", "..:audio_frame_view", + "../../../rtc_base:checks", "../../../rtc_base:gunit_helpers", "../../../rtc_base:safe_compare", "../../../test:test_support", diff --git a/modules/audio_processing/agc2/adaptive_agc.cc b/modules/audio_processing/agc2/adaptive_agc.cc deleted file mode 100644 index 3fc9008db1..0000000000 --- a/modules/audio_processing/agc2/adaptive_agc.cc +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "modules/audio_processing/agc2/adaptive_agc.h" - -#include "common_audio/include/audio_util.h" -#include "modules/audio_processing/agc2/cpu_features.h" -#include "modules/audio_processing/agc2/vad_with_level.h" -#include "modules/audio_processing/logging/apm_data_dumper.h" -#include "rtc_base/checks.h" -#include "rtc_base/logging.h" - -namespace webrtc { -namespace { - -using AdaptiveDigitalConfig = - AudioProcessing::Config::GainController2::AdaptiveDigital; -using NoiseEstimatorType = - AudioProcessing::Config::GainController2::NoiseEstimator; - -// Detects the available CPU features and applies any kill-switches. -AvailableCpuFeatures GetAllowedCpuFeatures( - const AdaptiveDigitalConfig& config) { - AvailableCpuFeatures features = GetAvailableCpuFeatures(); - if (!config.sse2_allowed) { - features.sse2 = false; - } - if (!config.avx2_allowed) { - features.avx2 = false; - } - if (!config.neon_allowed) { - features.neon = false; - } - return features; -} - -std::unique_ptr CreateNoiseLevelEstimator( - NoiseEstimatorType estimator_type, - ApmDataDumper* apm_data_dumper) { - switch (estimator_type) { - case NoiseEstimatorType::kStationaryNoise: - return CreateStationaryNoiseEstimator(apm_data_dumper); - case NoiseEstimatorType::kNoiseFloor: - return CreateNoiseFloorEstimator(apm_data_dumper); - } -} - -} // namespace - -AdaptiveAgc::AdaptiveAgc(ApmDataDumper* apm_data_dumper, - const AdaptiveDigitalConfig& config) - : speech_level_estimator_(apm_data_dumper, - config.adjacent_speech_frames_threshold), - vad_(config.vad_reset_period_ms, GetAllowedCpuFeatures(config)), - gain_controller_(apm_data_dumper, - config.adjacent_speech_frames_threshold, - config.max_gain_change_db_per_second, - config.max_output_noise_level_dbfs, - config.dry_run), - apm_data_dumper_(apm_data_dumper), - noise_level_estimator_( - CreateNoiseLevelEstimator(config.noise_estimator, apm_data_dumper)), - saturation_protector_( - CreateSaturationProtector(kSaturationProtectorInitialHeadroomDb, - kSaturationProtectorExtraHeadroomDb, - config.adjacent_speech_frames_threshold, - apm_data_dumper)) { - RTC_DCHECK(apm_data_dumper); - RTC_DCHECK(noise_level_estimator_); - RTC_DCHECK(saturation_protector_); - if (!config.use_saturation_protector) { - RTC_LOG(LS_WARNING) << "The saturation protector cannot be disabled."; - } -} - -AdaptiveAgc::~AdaptiveAgc() = default; - -void AdaptiveAgc::Initialize(int sample_rate_hz, int num_channels) { - gain_controller_.Initialize(sample_rate_hz, num_channels); -} - -void AdaptiveAgc::Process(AudioFrameView frame, float limiter_envelope) { - AdaptiveDigitalGainApplier::FrameInfo info; - - VadLevelAnalyzer::Result vad_result = vad_.AnalyzeFrame(frame); - info.speech_probability = vad_result.speech_probability; - apm_data_dumper_->DumpRaw("agc2_speech_probability", - vad_result.speech_probability); - apm_data_dumper_->DumpRaw("agc2_input_rms_dbfs", vad_result.rms_dbfs); - apm_data_dumper_->DumpRaw("agc2_input_peak_dbfs", vad_result.peak_dbfs); - - speech_level_estimator_.Update(vad_result); - info.speech_level_dbfs = speech_level_estimator_.level_dbfs(); - info.speech_level_reliable = speech_level_estimator_.IsConfident(); - apm_data_dumper_->DumpRaw("agc2_speech_level_dbfs", info.speech_level_dbfs); - apm_data_dumper_->DumpRaw("agc2_speech_level_reliable", - info.speech_level_reliable); - - info.noise_rms_dbfs = noise_level_estimator_->Analyze(frame); - apm_data_dumper_->DumpRaw("agc2_noise_rms_dbfs", info.noise_rms_dbfs); - - saturation_protector_->Analyze(info.speech_probability, vad_result.peak_dbfs, - info.speech_level_dbfs); - info.headroom_db = saturation_protector_->HeadroomDb(); - apm_data_dumper_->DumpRaw("agc2_headroom_db", info.headroom_db); - - info.limiter_envelope_dbfs = FloatS16ToDbfs(limiter_envelope); - apm_data_dumper_->DumpRaw("agc2_limiter_envelope_dbfs", - info.limiter_envelope_dbfs); - - gain_controller_.Process(info, frame); -} - -void AdaptiveAgc::HandleInputGainChange() { - speech_level_estimator_.Reset(); - saturation_protector_->Reset(); -} - -} // namespace webrtc diff --git a/modules/audio_processing/agc2/adaptive_digital_gain_applier.cc b/modules/audio_processing/agc2/adaptive_digital_gain_applier.cc index e59b110efe..a34f598874 100644 --- a/modules/audio_processing/agc2/adaptive_digital_gain_applier.cc +++ b/modules/audio_processing/agc2/adaptive_digital_gain_applier.cc @@ -23,31 +23,38 @@ namespace webrtc { namespace { +using AdaptiveDigitalConfig = + AudioProcessing::Config::GainController2::AdaptiveDigital; + constexpr int kHeadroomHistogramMin = 0; constexpr int kHeadroomHistogramMax = 50; +constexpr int kGainDbHistogramMax = 30; -// This function maps input level to desired applied gain. We want to -// boost the signal so that peaks are at -kHeadroomDbfs. We can't -// apply more than kMaxGainDb gain. -float ComputeGainDb(float input_level_dbfs) { - // If the level is very low, boost it as much as we can. - if (input_level_dbfs < -(kHeadroomDbfs + kMaxGainDb)) { - return kMaxGainDb; +// Computes the gain for `input_level_dbfs` to reach `-config.headroom_db`. +// Clamps the gain in [0, `config.max_gain_db`]. `config.headroom_db` is a +// safety margin to allow transient peaks to exceed the target peak level +// without clipping. +float ComputeGainDb(float input_level_dbfs, + const AdaptiveDigitalConfig& config) { + // If the level is very low, apply the maximum gain. + if (input_level_dbfs < -(config.headroom_db + config.max_gain_db)) { + return config.max_gain_db; } // We expect to end up here most of the time: the level is below // -headroom, but we can boost it to -headroom. - if (input_level_dbfs < -kHeadroomDbfs) { - return -kHeadroomDbfs - input_level_dbfs; + if (input_level_dbfs < -config.headroom_db) { + return -config.headroom_db - input_level_dbfs; } - // Otherwise, the level is too high and we can't boost. - RTC_DCHECK_GE(input_level_dbfs, -kHeadroomDbfs); - return 0.f; + // The level is too high and we can't boost. + RTC_DCHECK_GE(input_level_dbfs, -config.headroom_db); + return 0.0f; } -// Returns `target_gain` if the output noise level is below -// `max_output_noise_level_dbfs`; otherwise returns a capped gain so that the -// output noise level equals `max_output_noise_level_dbfs`. -float LimitGainByNoise(float target_gain, +// Returns `target_gain_db` if applying such a gain to `input_noise_level_dbfs` +// does not exceed `max_output_noise_level_dbfs`. Otherwise lowers and returns +// `target_gain_db` so that the output noise level equals +// `max_output_noise_level_dbfs`. +float LimitGainByNoise(float target_gain_db, float input_noise_level_dbfs, float max_output_noise_level_dbfs, ApmDataDumper& apm_data_dumper) { @@ -55,24 +62,25 @@ float LimitGainByNoise(float target_gain, max_output_noise_level_dbfs - input_noise_level_dbfs; apm_data_dumper.DumpRaw("agc2_adaptive_gain_applier_max_allowed_gain_db", max_allowed_gain_db); - return std::min(target_gain, std::max(max_allowed_gain_db, 0.f)); + return std::min(target_gain_db, std::max(max_allowed_gain_db, 0.0f)); } -float LimitGainByLowConfidence(float target_gain, - float last_gain, +float LimitGainByLowConfidence(float target_gain_db, + float last_gain_db, float limiter_audio_level_dbfs, bool estimate_is_confident) { if (estimate_is_confident || limiter_audio_level_dbfs <= kLimiterThresholdForAgcGainDbfs) { - return target_gain; + return target_gain_db; } - const float limiter_level_before_gain = limiter_audio_level_dbfs - last_gain; + const float limiter_level_dbfs_before_gain = + limiter_audio_level_dbfs - last_gain_db; - // Compute a new gain so that `limiter_level_before_gain` + `new_target_gain` - // is not great than `kLimiterThresholdForAgcGainDbfs`. - const float new_target_gain = std::max( - kLimiterThresholdForAgcGainDbfs - limiter_level_before_gain, 0.f); - return std::min(new_target_gain, target_gain); + // Compute a new gain so that `limiter_level_dbfs_before_gain` + + // `new_target_gain_db` is not great than `kLimiterThresholdForAgcGainDbfs`. + const float new_target_gain_db = std::max( + kLimiterThresholdForAgcGainDbfs - limiter_level_dbfs_before_gain, 0.0f); + return std::min(new_target_gain_db, target_gain_db); } // Computes how the gain should change during this frame. @@ -86,7 +94,7 @@ float ComputeGainChangeThisFrameDb(float target_gain_db, RTC_DCHECK_GT(max_gain_increase_db, 0); float target_gain_difference_db = target_gain_db - last_gain_db; if (!gain_increase_allowed) { - target_gain_difference_db = std::min(target_gain_difference_db, 0.f); + target_gain_difference_db = std::min(target_gain_difference_db, 0.0f); } return rtc::SafeClamp(target_gain_difference_db, -max_gain_decrease_db, max_gain_increase_db); @@ -98,7 +106,7 @@ void CopyAudio(AudioFrameView src, RTC_DCHECK_GT(src.num_channels(), 0); RTC_DCHECK_GT(src.samples_per_channel(), 0); RTC_DCHECK_EQ(dst.size(), src.num_channels()); - for (size_t c = 0; c < src.num_channels(); ++c) { + for (int c = 0; c < src.num_channels(); ++c) { rtc::ArrayView channel_view = src.channel(c); RTC_DCHECK_EQ(channel_view.size(), src.samples_per_channel()); RTC_DCHECK_EQ(dst[c].size(), src.samples_per_channel()); @@ -110,32 +118,30 @@ void CopyAudio(AudioFrameView src, AdaptiveDigitalGainApplier::AdaptiveDigitalGainApplier( ApmDataDumper* apm_data_dumper, - int adjacent_speech_frames_threshold, - float max_gain_change_db_per_second, - float max_output_noise_level_dbfs, - bool dry_run) + const AudioProcessing::Config::GainController2::AdaptiveDigital& config, + int sample_rate_hz, + int num_channels) : apm_data_dumper_(apm_data_dumper), gain_applier_( /*hard_clip_samples=*/false, - /*initial_gain_factor=*/DbToRatio(kInitialAdaptiveDigitalGainDb)), - adjacent_speech_frames_threshold_(adjacent_speech_frames_threshold), - max_gain_change_db_per_10ms_(max_gain_change_db_per_second * - kFrameDurationMs / 1000.f), - max_output_noise_level_dbfs_(max_output_noise_level_dbfs), - dry_run_(dry_run), + /*initial_gain_factor=*/DbToRatio(config.initial_gain_db)), + config_(config), + max_gain_change_db_per_10ms_(config_.max_gain_change_db_per_second * + kFrameDurationMs / 1000.0f), calls_since_last_gain_log_(0), - frames_to_gain_increase_allowed_(adjacent_speech_frames_threshold_), - last_gain_db_(kInitialAdaptiveDigitalGainDb) { - RTC_DCHECK_GT(max_gain_change_db_per_second, 0.0f); + frames_to_gain_increase_allowed_( + config_.adjacent_speech_frames_threshold), + last_gain_db_(config_.initial_gain_db) { + RTC_DCHECK_GT(max_gain_change_db_per_10ms_, 0.0f); RTC_DCHECK_GE(frames_to_gain_increase_allowed_, 1); - RTC_DCHECK_GE(max_output_noise_level_dbfs_, -90.0f); - RTC_DCHECK_LE(max_output_noise_level_dbfs_, 0.0f); - Initialize(/*sample_rate_hz=*/48000, /*num_channels=*/1); + RTC_DCHECK_GE(config_.max_output_noise_level_dbfs, -90.0f); + RTC_DCHECK_LE(config_.max_output_noise_level_dbfs, 0.0f); + Initialize(sample_rate_hz, num_channels); } void AdaptiveDigitalGainApplier::Initialize(int sample_rate_hz, int num_channels) { - if (!dry_run_) { + if (!config_.dry_run) { return; } RTC_DCHECK_GT(sample_rate_hz, 0); @@ -159,7 +165,7 @@ void AdaptiveDigitalGainApplier::Initialize(int sample_rate_hz, void AdaptiveDigitalGainApplier::Process(const FrameInfo& info, AudioFrameView frame) { - RTC_DCHECK_GE(info.speech_level_dbfs, -150.f); + RTC_DCHECK_GE(info.speech_level_dbfs, -150.0f); RTC_DCHECK_GE(frame.num_channels(), 1); RTC_DCHECK( frame.samples_per_channel() == 80 || frame.samples_per_channel() == 160 || @@ -172,15 +178,16 @@ void AdaptiveDigitalGainApplier::Process(const FrameInfo& info, const float input_level_dbfs = info.speech_level_dbfs + info.headroom_db; const float target_gain_db = LimitGainByLowConfidence( - LimitGainByNoise(ComputeGainDb(input_level_dbfs), info.noise_rms_dbfs, - max_output_noise_level_dbfs_, *apm_data_dumper_), + LimitGainByNoise(ComputeGainDb(input_level_dbfs, config_), + info.noise_rms_dbfs, config_.max_output_noise_level_dbfs, + *apm_data_dumper_), last_gain_db_, info.limiter_envelope_dbfs, info.speech_level_reliable); // Forbid increasing the gain until enough adjacent speech frames are // observed. bool first_confident_speech_frame = false; if (info.speech_probability < kVadConfidenceThreshold) { - frames_to_gain_increase_allowed_ = adjacent_speech_frames_threshold_; + frames_to_gain_increase_allowed_ = config_.adjacent_speech_frames_threshold; } else if (frames_to_gain_increase_allowed_ > 0) { frames_to_gain_increase_allowed_--; first_confident_speech_frame = frames_to_gain_increase_allowed_ == 0; @@ -196,7 +203,7 @@ void AdaptiveDigitalGainApplier::Process(const FrameInfo& info, // No gain increase happened while waiting for a long enough speech // sequence. Therefore, temporarily allow a faster gain increase. RTC_DCHECK(gain_increase_allowed); - max_gain_increase_db *= adjacent_speech_frames_threshold_; + max_gain_increase_db *= config_.adjacent_speech_frames_threshold; } const float gain_change_this_frame_db = ComputeGainChangeThisFrameDb( @@ -217,7 +224,7 @@ void AdaptiveDigitalGainApplier::Process(const FrameInfo& info, } // Modify `frame` only if not running in "dry run" mode. - if (!dry_run_) { + if (!config_.dry_run) { gain_applier_.ApplyGain(frame); } else { // Copy `frame` so that `ApplyGain()` is called (on a copy). @@ -247,7 +254,8 @@ void AdaptiveDigitalGainApplier::Process(const FrameInfo& info, kHeadroomHistogramMax, kHeadroomHistogramMax - kHeadroomHistogramMin + 1); RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.Agc2.DigitalGainApplied", - last_gain_db_, 0, kMaxGainDb, kMaxGainDb + 1); + last_gain_db_, 0, kGainDbHistogramMax, + kGainDbHistogramMax + 1); RTC_LOG(LS_INFO) << "AGC2 adaptive digital" << " | speech_dbfs: " << info.speech_level_dbfs << " | noise_dbfs: " << info.noise_rms_dbfs diff --git a/modules/audio_processing/agc2/adaptive_digital_gain_applier.h b/modules/audio_processing/agc2/adaptive_digital_gain_applier.h index 8b58ea00b2..dc84c1e238 100644 --- a/modules/audio_processing/agc2/adaptive_digital_gain_applier.h +++ b/modules/audio_processing/agc2/adaptive_digital_gain_applier.h @@ -15,12 +15,13 @@ #include "modules/audio_processing/agc2/gain_applier.h" #include "modules/audio_processing/include/audio_frame_view.h" +#include "modules/audio_processing/include/audio_processing.h" namespace webrtc { class ApmDataDumper; -// TODO(bugs.webrtc.org): Split into `GainAdaptor` and `GainApplier`. +// TODO(bugs.webrtc.org/7494): Split into `GainAdaptor` and `GainApplier`. // Selects the target digital gain, decides when and how quickly to adapt to the // target and applies the current gain to 10 ms frames. class AdaptiveDigitalGainApplier { @@ -35,16 +36,11 @@ class AdaptiveDigitalGainApplier { float limiter_envelope_dbfs; // Envelope level from the limiter (dBFS). }; - // Ctor. `adjacent_speech_frames_threshold` indicates how many adjacent speech - // frames must be observed in order to consider the sequence as speech. - // `max_gain_change_db_per_second` limits the adaptation speed (uniformly - // operated across frames). `max_output_noise_level_dbfs` limits the output - // noise level. If `dry_run` is true, `Process()` will not modify the audio. - AdaptiveDigitalGainApplier(ApmDataDumper* apm_data_dumper, - int adjacent_speech_frames_threshold, - float max_gain_change_db_per_second, - float max_output_noise_level_dbfs, - bool dry_run); + AdaptiveDigitalGainApplier( + ApmDataDumper* apm_data_dumper, + const AudioProcessing::Config::GainController2::AdaptiveDigital& config, + int sample_rate_hz, + int num_channels); AdaptiveDigitalGainApplier(const AdaptiveDigitalGainApplier&) = delete; AdaptiveDigitalGainApplier& operator=(const AdaptiveDigitalGainApplier&) = delete; @@ -59,10 +55,8 @@ class AdaptiveDigitalGainApplier { ApmDataDumper* const apm_data_dumper_; GainApplier gain_applier_; - const int adjacent_speech_frames_threshold_; + const AudioProcessing::Config::GainController2::AdaptiveDigital config_; const float max_gain_change_db_per_10ms_; - const float max_output_noise_level_dbfs_; - const bool dry_run_; int calls_since_last_gain_log_; int frames_to_gain_increase_allowed_; diff --git a/modules/audio_processing/agc2/adaptive_digital_gain_applier_unittest.cc b/modules/audio_processing/agc2/adaptive_digital_gain_applier_unittest.cc index f4a23a92b9..ea7485f512 100644 --- a/modules/audio_processing/agc2/adaptive_digital_gain_applier_unittest.cc +++ b/modules/audio_processing/agc2/adaptive_digital_gain_applier_unittest.cc @@ -16,6 +16,7 @@ #include "common_audio/include/audio_util.h" #include "modules/audio_processing/agc2/agc2_common.h" #include "modules/audio_processing/agc2/vector_float_frame.h" +#include "modules/audio_processing/include/audio_processing.h" #include "modules/audio_processing/logging/apm_data_dumper.h" #include "rtc_base/gunit.h" @@ -31,59 +32,72 @@ constexpr float kMaxSpeechProbability = 1.0f; // Constants used in place of estimated noise levels. constexpr float kNoNoiseDbfs = kMinLevelDbfs; -constexpr float kWithNoiseDbfs = -20.f; +constexpr float kWithNoiseDbfs = -20.0f; -constexpr float kMaxGainChangePerSecondDb = 3.0f; -constexpr float kMaxGainChangePerFrameDb = - kMaxGainChangePerSecondDb * kFrameDurationMs / 1000.0f; -constexpr float kMaxOutputNoiseLevelDbfs = -50.0f; +// Number of additional frames to process in the tests to ensure that the tested +// adaptation processes have converged. +constexpr int kNumExtraFrames = 10; + +constexpr float GetMaxGainChangePerFrameDb( + float max_gain_change_db_per_second) { + return max_gain_change_db_per_second * kFrameDurationMs / 1000.0f; +} + +using AdaptiveDigitalConfig = + AudioProcessing::Config::GainController2::AdaptiveDigital; + +constexpr AdaptiveDigitalConfig kDefaultConfig{}; // Helper to create initialized `AdaptiveDigitalGainApplier` objects. struct GainApplierHelper { - GainApplierHelper() - : GainApplierHelper(/*adjacent_speech_frames_threshold=*/1) {} - explicit GainApplierHelper(int adjacent_speech_frames_threshold) + GainApplierHelper(const AdaptiveDigitalConfig& config, + int sample_rate_hz, + int num_channels) : apm_data_dumper(0), - gain_applier(std::make_unique( - &apm_data_dumper, - adjacent_speech_frames_threshold, - kMaxGainChangePerSecondDb, - kMaxOutputNoiseLevelDbfs, - /*dry_run=*/false)) {} + gain_applier( + std::make_unique(&apm_data_dumper, + config, + sample_rate_hz, + num_channels)) {} ApmDataDumper apm_data_dumper; std::unique_ptr gain_applier; }; -// Voice on, no noise, low limiter, confident level. -static_assert(std::is_trivially_destructible< - AdaptiveDigitalGainApplier::FrameInfo>::value, - ""); -constexpr AdaptiveDigitalGainApplier::FrameInfo kFrameInfo{ - /*speech_probability=*/kMaxSpeechProbability, - /*speech_level_dbfs=*/kInitialSpeechLevelEstimateDbfs, - /*speech_level_reliable=*/true, - /*noise_rms_dbfs=*/kNoNoiseDbfs, - /*headroom_db=*/kSaturationProtectorInitialHeadroomDb, - /*limiter_envelope_dbfs=*/-2.0f}; +// Returns a `FrameInfo` sample to simulate noiseless speech detected with +// maximum probability and with level, headroom and limiter envelope chosen +// so that the resulting gain equals the default initial adaptive digital gain +// i.e., no gain adaptation is expected. +AdaptiveDigitalGainApplier::FrameInfo GetFrameInfoToNotAdapt( + const AdaptiveDigitalConfig& config) { + AdaptiveDigitalGainApplier::FrameInfo info; + info.speech_probability = kMaxSpeechProbability; + info.speech_level_dbfs = -config.initial_gain_db - config.headroom_db; + info.speech_level_reliable = true; + info.noise_rms_dbfs = kNoNoiseDbfs; + info.headroom_db = config.headroom_db; + info.limiter_envelope_dbfs = -2.0f; + return info; +} TEST(GainController2AdaptiveGainApplier, GainApplierShouldNotCrash) { - GainApplierHelper helper; - helper.gain_applier->Initialize(/*sample_rate_hz=*/48000, kStereo); + GainApplierHelper helper(kDefaultConfig, /*sample_rate_hz=*/48000, kStereo); // Make one call with reasonable audio level values and settings. VectorFloatFrame fake_audio(kStereo, kFrameLen10ms48kHz, 10000.0f); - AdaptiveDigitalGainApplier::FrameInfo info = kFrameInfo; - info.speech_level_dbfs = -5.0f; - helper.gain_applier->Process(kFrameInfo, fake_audio.float_frame_view()); + helper.gain_applier->Process(GetFrameInfoToNotAdapt(kDefaultConfig), + fake_audio.float_frame_view()); } // Checks that the maximum allowed gain is applied. TEST(GainController2AdaptiveGainApplier, MaxGainApplied) { constexpr int kNumFramesToAdapt = - static_cast(kMaxGainDb / kMaxGainChangePerFrameDb) + 10; - - GainApplierHelper helper; - helper.gain_applier->Initialize(/*sample_rate_hz=*/8000, kMono); - AdaptiveDigitalGainApplier::FrameInfo info = kFrameInfo; + static_cast(kDefaultConfig.max_gain_db / + GetMaxGainChangePerFrameDb( + kDefaultConfig.max_gain_change_db_per_second)) + + kNumExtraFrames; + + GainApplierHelper helper(kDefaultConfig, /*sample_rate_hz=*/8000, kMono); + AdaptiveDigitalGainApplier::FrameInfo info = + GetFrameInfoToNotAdapt(kDefaultConfig); info.speech_level_dbfs = -60.0f; float applied_gain; for (int i = 0; i < kNumFramesToAdapt; ++i) { @@ -92,30 +106,32 @@ TEST(GainController2AdaptiveGainApplier, MaxGainApplied) { applied_gain = fake_audio.float_frame_view().channel(0)[0]; } const float applied_gain_db = 20.0f * std::log10f(applied_gain); - EXPECT_NEAR(applied_gain_db, kMaxGainDb, 0.1f); + EXPECT_NEAR(applied_gain_db, kDefaultConfig.max_gain_db, 0.1f); } TEST(GainController2AdaptiveGainApplier, GainDoesNotChangeFast) { - GainApplierHelper helper; - helper.gain_applier->Initialize(/*sample_rate_hz=*/8000, kMono); + GainApplierHelper helper(kDefaultConfig, /*sample_rate_hz=*/8000, kMono); constexpr float initial_level_dbfs = -25.0f; - // A few extra frames for safety. + constexpr float kMaxGainChangeDbPerFrame = + GetMaxGainChangePerFrameDb(kDefaultConfig.max_gain_change_db_per_second); constexpr int kNumFramesToAdapt = - static_cast(initial_level_dbfs / kMaxGainChangePerFrameDb) + 10; + static_cast(initial_level_dbfs / kMaxGainChangeDbPerFrame) + + kNumExtraFrames; - const float kMaxChangePerFrameLinear = DbToRatio(kMaxGainChangePerFrameDb); + const float max_change_per_frame_linear = DbToRatio(kMaxGainChangeDbPerFrame); float last_gain_linear = 1.f; for (int i = 0; i < kNumFramesToAdapt; ++i) { SCOPED_TRACE(i); VectorFloatFrame fake_audio(kMono, kFrameLen10ms8kHz, 1.0f); - AdaptiveDigitalGainApplier::FrameInfo info = kFrameInfo; + AdaptiveDigitalGainApplier::FrameInfo info = + GetFrameInfoToNotAdapt(kDefaultConfig); info.speech_level_dbfs = initial_level_dbfs; helper.gain_applier->Process(info, fake_audio.float_frame_view()); float current_gain_linear = fake_audio.float_frame_view().channel(0)[0]; EXPECT_LE(std::abs(current_gain_linear - last_gain_linear), - kMaxChangePerFrameLinear); + max_change_per_frame_linear); last_gain_linear = current_gain_linear; } @@ -123,56 +139,59 @@ TEST(GainController2AdaptiveGainApplier, GainDoesNotChangeFast) { for (int i = 0; i < kNumFramesToAdapt; ++i) { SCOPED_TRACE(i); VectorFloatFrame fake_audio(kMono, kFrameLen10ms8kHz, 1.0f); - AdaptiveDigitalGainApplier::FrameInfo info = kFrameInfo; + AdaptiveDigitalGainApplier::FrameInfo info = + GetFrameInfoToNotAdapt(kDefaultConfig); info.speech_level_dbfs = 0.f; helper.gain_applier->Process(info, fake_audio.float_frame_view()); float current_gain_linear = fake_audio.float_frame_view().channel(0)[0]; EXPECT_LE(std::abs(current_gain_linear - last_gain_linear), - kMaxChangePerFrameLinear); + max_change_per_frame_linear); last_gain_linear = current_gain_linear; } } TEST(GainController2AdaptiveGainApplier, GainIsRampedInAFrame) { - GainApplierHelper helper; - helper.gain_applier->Initialize(/*sample_rate_hz=*/48000, kMono); + GainApplierHelper helper(kDefaultConfig, /*sample_rate_hz=*/48000, kMono); constexpr float initial_level_dbfs = -25.0f; VectorFloatFrame fake_audio(kMono, kFrameLen10ms48kHz, 1.0f); - AdaptiveDigitalGainApplier::FrameInfo info = kFrameInfo; + AdaptiveDigitalGainApplier::FrameInfo info = + GetFrameInfoToNotAdapt(kDefaultConfig); info.speech_level_dbfs = initial_level_dbfs; helper.gain_applier->Process(info, fake_audio.float_frame_view()); float maximal_difference = 0.0f; - float current_value = 1.0f * DbToRatio(kInitialAdaptiveDigitalGainDb); + float current_value = 1.0f * DbToRatio(kDefaultConfig.initial_gain_db); for (const auto& x : fake_audio.float_frame_view().channel(0)) { const float difference = std::abs(x - current_value); maximal_difference = std::max(maximal_difference, difference); current_value = x; } - const float kMaxChangePerFrameLinear = DbToRatio(kMaxGainChangePerFrameDb); - const float kMaxChangePerSample = - kMaxChangePerFrameLinear / kFrameLen10ms48kHz; + const float max_change_per_frame_linear = DbToRatio( + GetMaxGainChangePerFrameDb(kDefaultConfig.max_gain_change_db_per_second)); + const float max_change_per_sample = + max_change_per_frame_linear / kFrameLen10ms48kHz; - EXPECT_LE(maximal_difference, kMaxChangePerSample); + EXPECT_LE(maximal_difference, max_change_per_sample); } TEST(GainController2AdaptiveGainApplier, NoiseLimitsGain) { - GainApplierHelper helper; - helper.gain_applier->Initialize(/*sample_rate_hz=*/48000, kMono); + GainApplierHelper helper(kDefaultConfig, /*sample_rate_hz=*/48000, kMono); constexpr float initial_level_dbfs = -25.0f; constexpr int num_initial_frames = - kInitialAdaptiveDigitalGainDb / kMaxGainChangePerFrameDb; + kDefaultConfig.initial_gain_db / + GetMaxGainChangePerFrameDb(kDefaultConfig.max_gain_change_db_per_second); constexpr int num_frames = 50; - ASSERT_GT(kWithNoiseDbfs, kMaxOutputNoiseLevelDbfs) + ASSERT_GT(kWithNoiseDbfs, kDefaultConfig.max_output_noise_level_dbfs) << "kWithNoiseDbfs is too low"; for (int i = 0; i < num_initial_frames + num_frames; ++i) { VectorFloatFrame fake_audio(kMono, kFrameLen10ms48kHz, 1.0f); - AdaptiveDigitalGainApplier::FrameInfo info = kFrameInfo; + AdaptiveDigitalGainApplier::FrameInfo info = + GetFrameInfoToNotAdapt(kDefaultConfig); info.speech_level_dbfs = initial_level_dbfs; info.noise_rms_dbfs = kWithNoiseDbfs; helper.gain_applier->Process(info, fake_audio.float_frame_view()); @@ -189,31 +208,32 @@ TEST(GainController2AdaptiveGainApplier, NoiseLimitsGain) { } TEST(GainController2GainApplier, CanHandlePositiveSpeechLevels) { - GainApplierHelper helper; - helper.gain_applier->Initialize(/*sample_rate_hz=*/48000, kStereo); + GainApplierHelper helper(kDefaultConfig, /*sample_rate_hz=*/48000, kStereo); // Make one call with positive audio level values and settings. VectorFloatFrame fake_audio(kStereo, kFrameLen10ms48kHz, 10000.0f); - AdaptiveDigitalGainApplier::FrameInfo info = kFrameInfo; + AdaptiveDigitalGainApplier::FrameInfo info = + GetFrameInfoToNotAdapt(kDefaultConfig); info.speech_level_dbfs = 5.0f; helper.gain_applier->Process(info, fake_audio.float_frame_view()); } TEST(GainController2GainApplier, AudioLevelLimitsGain) { - GainApplierHelper helper; - helper.gain_applier->Initialize(/*sample_rate_hz=*/48000, kMono); + GainApplierHelper helper(kDefaultConfig, /*sample_rate_hz=*/48000, kMono); constexpr float initial_level_dbfs = -25.0f; constexpr int num_initial_frames = - kInitialAdaptiveDigitalGainDb / kMaxGainChangePerFrameDb; + kDefaultConfig.initial_gain_db / + GetMaxGainChangePerFrameDb(kDefaultConfig.max_gain_change_db_per_second); constexpr int num_frames = 50; - ASSERT_GT(kWithNoiseDbfs, kMaxOutputNoiseLevelDbfs) + ASSERT_GT(kWithNoiseDbfs, kDefaultConfig.max_output_noise_level_dbfs) << "kWithNoiseDbfs is too low"; for (int i = 0; i < num_initial_frames + num_frames; ++i) { VectorFloatFrame fake_audio(kMono, kFrameLen10ms48kHz, 1.0f); - AdaptiveDigitalGainApplier::FrameInfo info = kFrameInfo; + AdaptiveDigitalGainApplier::FrameInfo info = + GetFrameInfoToNotAdapt(kDefaultConfig); info.speech_level_dbfs = initial_level_dbfs; info.limiter_envelope_dbfs = 1.0f; info.speech_level_reliable = false; @@ -232,46 +252,54 @@ TEST(GainController2GainApplier, AudioLevelLimitsGain) { class AdaptiveDigitalGainApplierTest : public ::testing::TestWithParam { protected: - int AdjacentSpeechFramesThreshold() const { return GetParam(); } + int adjacent_speech_frames_threshold() const { return GetParam(); } }; TEST_P(AdaptiveDigitalGainApplierTest, DoNotIncreaseGainWithTooFewSpeechFrames) { - const int adjacent_speech_frames_threshold = AdjacentSpeechFramesThreshold(); - GainApplierHelper helper(adjacent_speech_frames_threshold); - helper.gain_applier->Initialize(/*sample_rate_hz=*/48000, kMono); + AdaptiveDigitalConfig config; + config.adjacent_speech_frames_threshold = adjacent_speech_frames_threshold(); + GainApplierHelper helper(config, /*sample_rate_hz=*/48000, kMono); + + // Lower the speech level so that the target gain will be increased. + AdaptiveDigitalGainApplier::FrameInfo info = GetFrameInfoToNotAdapt(config); + info.speech_level_dbfs -= 12.0f; float prev_gain = 0.0f; - for (int i = 0; i < adjacent_speech_frames_threshold; ++i) { + for (int i = 0; i < config.adjacent_speech_frames_threshold; ++i) { SCOPED_TRACE(i); VectorFloatFrame audio(kMono, kFrameLen10ms48kHz, 1.0f); - helper.gain_applier->Process(kFrameInfo, audio.float_frame_view()); + helper.gain_applier->Process(info, audio.float_frame_view()); const float gain = audio.float_frame_view().channel(0)[0]; if (i > 0) { - EXPECT_EQ(prev_gain, gain); // No gain increase. + EXPECT_EQ(prev_gain, gain); // No gain increase applied. } prev_gain = gain; } } TEST_P(AdaptiveDigitalGainApplierTest, IncreaseGainWithEnoughSpeechFrames) { - const int adjacent_speech_frames_threshold = AdjacentSpeechFramesThreshold(); - GainApplierHelper helper(adjacent_speech_frames_threshold); - helper.gain_applier->Initialize(/*sample_rate_hz=*/48000, kMono); + AdaptiveDigitalConfig config; + config.adjacent_speech_frames_threshold = adjacent_speech_frames_threshold(); + GainApplierHelper helper(config, /*sample_rate_hz=*/48000, kMono); + + // Lower the speech level so that the target gain will be increased. + AdaptiveDigitalGainApplier::FrameInfo info = GetFrameInfoToNotAdapt(config); + info.speech_level_dbfs -= 12.0f; float prev_gain = 0.0f; - for (int i = 0; i < adjacent_speech_frames_threshold; ++i) { + for (int i = 0; i < config.adjacent_speech_frames_threshold; ++i) { SCOPED_TRACE(i); VectorFloatFrame audio(kMono, kFrameLen10ms48kHz, 1.0f); - helper.gain_applier->Process(kFrameInfo, audio.float_frame_view()); + helper.gain_applier->Process(info, audio.float_frame_view()); prev_gain = audio.float_frame_view().channel(0)[0]; } // Process one more speech frame. VectorFloatFrame audio(kMono, kFrameLen10ms48kHz, 1.0f); - helper.gain_applier->Process(kFrameInfo, audio.float_frame_view()); + helper.gain_applier->Process(info, audio.float_frame_view()); - // The gain has increased. + // An increased gain has been applied. EXPECT_GT(audio.float_frame_view().channel(0)[0], prev_gain); } @@ -281,63 +309,62 @@ INSTANTIATE_TEST_SUITE_P(GainController2, // Checks that the input is never modified when running in dry run mode. TEST(GainController2GainApplier, DryRunDoesNotChangeInput) { - ApmDataDumper apm_data_dumper(0); - AdaptiveDigitalGainApplier gain_applier( - &apm_data_dumper, /*adjacent_speech_frames_threshold=*/1, - kMaxGainChangePerSecondDb, kMaxOutputNoiseLevelDbfs, /*dry_run=*/true); + AdaptiveDigitalConfig config; + config.dry_run = true; + GainApplierHelper helper(config, /*sample_rate_hz=*/8000, kMono); + // Simulate an input signal with log speech level. - AdaptiveDigitalGainApplier::FrameInfo info = kFrameInfo; + AdaptiveDigitalGainApplier::FrameInfo info = GetFrameInfoToNotAdapt(config); info.speech_level_dbfs = -60.0f; - // Allow enough time to reach the maximum gain. - constexpr int kNumFramesToAdapt = - static_cast(kMaxGainDb / kMaxGainChangePerFrameDb) + 10; + const int num_frames_to_adapt = + static_cast( + config.max_gain_db / + GetMaxGainChangePerFrameDb(config.max_gain_change_db_per_second)) + + kNumExtraFrames; constexpr float kPcmSamples = 123.456f; // Run the gain applier and check that the PCM samples are not modified. - gain_applier.Initialize(/*sample_rate_hz=*/8000, kMono); - for (int i = 0; i < kNumFramesToAdapt; ++i) { + for (int i = 0; i < num_frames_to_adapt; ++i) { SCOPED_TRACE(i); VectorFloatFrame fake_audio(kMono, kFrameLen10ms8kHz, kPcmSamples); - gain_applier.Process(info, fake_audio.float_frame_view()); + helper.gain_applier->Process(info, fake_audio.float_frame_view()); EXPECT_FLOAT_EQ(fake_audio.float_frame_view().channel(0)[0], kPcmSamples); } } // Checks that no sample is modified before and after the sample rate changes. TEST(GainController2GainApplier, DryRunHandlesSampleRateChange) { - ApmDataDumper apm_data_dumper(0); - AdaptiveDigitalGainApplier gain_applier( - &apm_data_dumper, /*adjacent_speech_frames_threshold=*/1, - kMaxGainChangePerSecondDb, kMaxOutputNoiseLevelDbfs, /*dry_run=*/true); - AdaptiveDigitalGainApplier::FrameInfo info = kFrameInfo; + AdaptiveDigitalConfig config; + config.dry_run = true; + GainApplierHelper helper(config, /*sample_rate_hz=*/8000, kMono); + + AdaptiveDigitalGainApplier::FrameInfo info = GetFrameInfoToNotAdapt(config); info.speech_level_dbfs = -60.0f; constexpr float kPcmSamples = 123.456f; VectorFloatFrame fake_audio_8k(kMono, kFrameLen10ms8kHz, kPcmSamples); - gain_applier.Initialize(/*sample_rate_hz=*/8000, kMono); - gain_applier.Process(info, fake_audio_8k.float_frame_view()); + helper.gain_applier->Process(info, fake_audio_8k.float_frame_view()); EXPECT_FLOAT_EQ(fake_audio_8k.float_frame_view().channel(0)[0], kPcmSamples); - gain_applier.Initialize(/*sample_rate_hz=*/48000, kMono); + helper.gain_applier->Initialize(/*sample_rate_hz=*/48000, kMono); VectorFloatFrame fake_audio_48k(kMono, kFrameLen10ms48kHz, kPcmSamples); - gain_applier.Process(info, fake_audio_48k.float_frame_view()); + helper.gain_applier->Process(info, fake_audio_48k.float_frame_view()); EXPECT_FLOAT_EQ(fake_audio_48k.float_frame_view().channel(0)[0], kPcmSamples); } // Checks that no sample is modified before and after the number of channels // changes. TEST(GainController2GainApplier, DryRunHandlesNumChannelsChange) { - ApmDataDumper apm_data_dumper(0); - AdaptiveDigitalGainApplier gain_applier( - &apm_data_dumper, /*adjacent_speech_frames_threshold=*/1, - kMaxGainChangePerSecondDb, kMaxOutputNoiseLevelDbfs, /*dry_run=*/true); - AdaptiveDigitalGainApplier::FrameInfo info = kFrameInfo; + AdaptiveDigitalConfig config; + config.dry_run = true; + GainApplierHelper helper(config, /*sample_rate_hz=*/8000, kMono); + + AdaptiveDigitalGainApplier::FrameInfo info = GetFrameInfoToNotAdapt(config); info.speech_level_dbfs = -60.0f; constexpr float kPcmSamples = 123.456f; VectorFloatFrame fake_audio_8k(kMono, kFrameLen10ms8kHz, kPcmSamples); - gain_applier.Initialize(/*sample_rate_hz=*/8000, kMono); - gain_applier.Process(info, fake_audio_8k.float_frame_view()); + helper.gain_applier->Process(info, fake_audio_8k.float_frame_view()); EXPECT_FLOAT_EQ(fake_audio_8k.float_frame_view().channel(0)[0], kPcmSamples); VectorFloatFrame fake_audio_48k(kStereo, kFrameLen10ms8kHz, kPcmSamples); - gain_applier.Initialize(/*sample_rate_hz=*/8000, kStereo); - gain_applier.Process(info, fake_audio_48k.float_frame_view()); + helper.gain_applier->Initialize(/*sample_rate_hz=*/8000, kStereo); + helper.gain_applier->Process(info, fake_audio_48k.float_frame_view()); EXPECT_FLOAT_EQ(fake_audio_48k.float_frame_view().channel(0)[0], kPcmSamples); EXPECT_FLOAT_EQ(fake_audio_48k.float_frame_view().channel(1)[0], kPcmSamples); } diff --git a/modules/audio_processing/agc2/adaptive_digital_gain_controller.cc b/modules/audio_processing/agc2/adaptive_digital_gain_controller.cc new file mode 100644 index 0000000000..381e454868 --- /dev/null +++ b/modules/audio_processing/agc2/adaptive_digital_gain_controller.cc @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/agc2/adaptive_digital_gain_controller.h" + +#include + +#include "common_audio/include/audio_util.h" +#include "modules/audio_processing/agc2/vad_wrapper.h" +#include "modules/audio_processing/logging/apm_data_dumper.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { +namespace { + +// Peak and RMS audio levels in dBFS. +struct AudioLevels { + float peak_dbfs; + float rms_dbfs; +}; + +// Computes the audio levels for the first channel in `frame`. +AudioLevels ComputeAudioLevels(AudioFrameView frame) { + float peak = 0.0f; + float rms = 0.0f; + for (const auto& x : frame.channel(0)) { + peak = std::max(std::fabs(x), peak); + rms += x * x; + } + return {FloatS16ToDbfs(peak), + FloatS16ToDbfs(std::sqrt(rms / frame.samples_per_channel()))}; +} + +} // namespace + +AdaptiveDigitalGainController::AdaptiveDigitalGainController( + ApmDataDumper* apm_data_dumper, + const AudioProcessing::Config::GainController2::AdaptiveDigital& config, + int sample_rate_hz, + int num_channels) + : speech_level_estimator_(apm_data_dumper, config), + gain_controller_(apm_data_dumper, config, sample_rate_hz, num_channels), + apm_data_dumper_(apm_data_dumper), + noise_level_estimator_(CreateNoiseFloorEstimator(apm_data_dumper)), + saturation_protector_( + CreateSaturationProtector(kSaturationProtectorInitialHeadroomDb, + config.adjacent_speech_frames_threshold, + apm_data_dumper)) { + RTC_DCHECK(apm_data_dumper); + RTC_DCHECK(noise_level_estimator_); + RTC_DCHECK(saturation_protector_); +} + +AdaptiveDigitalGainController::~AdaptiveDigitalGainController() = default; + +void AdaptiveDigitalGainController::Initialize(int sample_rate_hz, + int num_channels) { + gain_controller_.Initialize(sample_rate_hz, num_channels); +} + +void AdaptiveDigitalGainController::Process(AudioFrameView frame, + float speech_probability, + float limiter_envelope) { + AudioLevels levels = ComputeAudioLevels(frame); + apm_data_dumper_->DumpRaw("agc2_input_rms_dbfs", levels.rms_dbfs); + apm_data_dumper_->DumpRaw("agc2_input_peak_dbfs", levels.peak_dbfs); + + AdaptiveDigitalGainApplier::FrameInfo info; + + info.speech_probability = speech_probability; + + speech_level_estimator_.Update(levels.rms_dbfs, levels.peak_dbfs, + info.speech_probability); + info.speech_level_dbfs = speech_level_estimator_.level_dbfs(); + info.speech_level_reliable = speech_level_estimator_.IsConfident(); + apm_data_dumper_->DumpRaw("agc2_speech_level_dbfs", info.speech_level_dbfs); + apm_data_dumper_->DumpRaw("agc2_speech_level_reliable", + info.speech_level_reliable); + + info.noise_rms_dbfs = noise_level_estimator_->Analyze(frame); + apm_data_dumper_->DumpRaw("agc2_noise_rms_dbfs", info.noise_rms_dbfs); + + saturation_protector_->Analyze(info.speech_probability, levels.peak_dbfs, + info.speech_level_dbfs); + info.headroom_db = saturation_protector_->HeadroomDb(); + apm_data_dumper_->DumpRaw("agc2_headroom_db", info.headroom_db); + + info.limiter_envelope_dbfs = FloatS16ToDbfs(limiter_envelope); + apm_data_dumper_->DumpRaw("agc2_limiter_envelope_dbfs", + info.limiter_envelope_dbfs); + + gain_controller_.Process(info, frame); +} + +void AdaptiveDigitalGainController::HandleInputGainChange() { + speech_level_estimator_.Reset(); + saturation_protector_->Reset(); +} + +} // namespace webrtc diff --git a/modules/audio_processing/agc2/adaptive_agc.h b/modules/audio_processing/agc2/adaptive_digital_gain_controller.h similarity index 57% rename from modules/audio_processing/agc2/adaptive_agc.h rename to modules/audio_processing/agc2/adaptive_digital_gain_controller.h index 43c7787e36..75ea44591e 100644 --- a/modules/audio_processing/agc2/adaptive_agc.h +++ b/modules/audio_processing/agc2/adaptive_digital_gain_controller.h @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_AGC_H_ -#define MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_AGC_H_ +#ifndef MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_DIGITAL_GAIN_CONTROLLER_H_ +#define MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_DIGITAL_GAIN_CONTROLLER_H_ #include @@ -17,37 +17,41 @@ #include "modules/audio_processing/agc2/adaptive_mode_level_estimator.h" #include "modules/audio_processing/agc2/noise_level_estimator.h" #include "modules/audio_processing/agc2/saturation_protector.h" -#include "modules/audio_processing/agc2/vad_with_level.h" #include "modules/audio_processing/include/audio_frame_view.h" #include "modules/audio_processing/include/audio_processing.h" namespace webrtc { class ApmDataDumper; -// Adaptive digital gain controller. -// TODO(crbug.com/webrtc/7494): Rename to `AdaptiveDigitalGainController`. -class AdaptiveAgc { +// Gain controller that adapts and applies a variable digital gain to meet the +// target level, which is determined by the given configuration. +class AdaptiveDigitalGainController { public: - AdaptiveAgc( + AdaptiveDigitalGainController( ApmDataDumper* apm_data_dumper, - const AudioProcessing::Config::GainController2::AdaptiveDigital& config); - ~AdaptiveAgc(); - + const AudioProcessing::Config::GainController2::AdaptiveDigital& config, + int sample_rate_hz, + int num_channels); + AdaptiveDigitalGainController(const AdaptiveDigitalGainController&) = delete; + AdaptiveDigitalGainController& operator=( + const AdaptiveDigitalGainController&) = delete; + ~AdaptiveDigitalGainController(); + + // Detects and handles changes of sample rate and or number of channels. void Initialize(int sample_rate_hz, int num_channels); - // TODO(crbug.com/webrtc/7494): Add `SetLimiterEnvelope()`. - - // Analyzes `frame` and applies a digital adaptive gain to it. Takes into - // account the envelope measured by the limiter. - // TODO(crbug.com/webrtc/7494): Remove `limiter_envelope`. - void Process(AudioFrameView frame, float limiter_envelope); + // Analyzes `frame`, adapts the current digital gain and applies it to + // `frame`. + // TODO(bugs.webrtc.org/7494): Remove `limiter_envelope`. + void Process(AudioFrameView frame, + float speech_probability, + float limiter_envelope); // Handles a gain change applied to the input signal (e.g., analog gain). void HandleInputGainChange(); private: AdaptiveModeLevelEstimator speech_level_estimator_; - VadLevelAnalyzer vad_; AdaptiveDigitalGainApplier gain_controller_; ApmDataDumper* const apm_data_dumper_; std::unique_ptr noise_level_estimator_; @@ -56,4 +60,4 @@ class AdaptiveAgc { } // namespace webrtc -#endif // MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_AGC_H_ +#endif // MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_DIGITAL_GAIN_CONTROLLER_H_ diff --git a/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc b/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc index 507aa12cb4..fe021fec05 100644 --- a/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc +++ b/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc @@ -19,11 +19,15 @@ namespace webrtc { namespace { -using LevelEstimatorType = - AudioProcessing::Config::GainController2::LevelEstimator; - float ClampLevelEstimateDbfs(float level_estimate_dbfs) { - return rtc::SafeClamp(level_estimate_dbfs, -90.f, 30.f); + return rtc::SafeClamp(level_estimate_dbfs, -90.0f, 30.0f); +} + +// Returns the initial speech level estimate needed to apply the initial gain. +float GetInitialSpeechLevelEstimateDbfs( + const AudioProcessing::Config::GainController2::AdaptiveDigital& config) { + return ClampLevelEstimateDbfs(-kSaturationProtectorInitialHeadroomDb - + config.initial_gain_db - config.headroom_db); } } // namespace @@ -40,32 +44,29 @@ float AdaptiveModeLevelEstimator::LevelEstimatorState::Ratio::GetRatio() const { return numerator / denominator; } -AdaptiveModeLevelEstimator::AdaptiveModeLevelEstimator( - ApmDataDumper* apm_data_dumper) - : AdaptiveModeLevelEstimator( - apm_data_dumper, - kDefaultLevelEstimatorAdjacentSpeechFramesThreshold) {} - AdaptiveModeLevelEstimator::AdaptiveModeLevelEstimator( ApmDataDumper* apm_data_dumper, - int adjacent_speech_frames_threshold) + const AudioProcessing::Config::GainController2::AdaptiveDigital& config) : apm_data_dumper_(apm_data_dumper), - adjacent_speech_frames_threshold_(adjacent_speech_frames_threshold), - level_dbfs_(ClampLevelEstimateDbfs(kInitialSpeechLevelEstimateDbfs)) { + initial_speech_level_dbfs_(GetInitialSpeechLevelEstimateDbfs(config)), + adjacent_speech_frames_threshold_( + config.adjacent_speech_frames_threshold), + level_dbfs_(initial_speech_level_dbfs_) { RTC_DCHECK(apm_data_dumper_); RTC_DCHECK_GE(adjacent_speech_frames_threshold_, 1); Reset(); } -void AdaptiveModeLevelEstimator::Update( - const VadLevelAnalyzer::Result& vad_level) { - RTC_DCHECK_GT(vad_level.rms_dbfs, -150.f); - RTC_DCHECK_LT(vad_level.rms_dbfs, 50.f); - RTC_DCHECK_GT(vad_level.peak_dbfs, -150.f); - RTC_DCHECK_LT(vad_level.peak_dbfs, 50.f); - RTC_DCHECK_GE(vad_level.speech_probability, 0.f); - RTC_DCHECK_LE(vad_level.speech_probability, 1.f); - if (vad_level.speech_probability < kVadConfidenceThreshold) { +void AdaptiveModeLevelEstimator::Update(float rms_dbfs, + float peak_dbfs, + float speech_probability) { + RTC_DCHECK_GT(rms_dbfs, -150.0f); + RTC_DCHECK_LT(rms_dbfs, 50.0f); + RTC_DCHECK_GT(peak_dbfs, -150.0f); + RTC_DCHECK_LT(peak_dbfs, 50.0f); + RTC_DCHECK_GE(speech_probability, 0.0f); + RTC_DCHECK_LE(speech_probability, 1.0f); + if (speech_probability < kVadConfidenceThreshold) { // Not a speech frame. if (adjacent_speech_frames_threshold_ > 1) { // When two or more adjacent speech frames are required in order to update @@ -93,14 +94,14 @@ void AdaptiveModeLevelEstimator::Update( preliminary_state_.time_to_confidence_ms -= kFrameDurationMs; } // Weighted average of levels with speech probability as weight. - RTC_DCHECK_GT(vad_level.speech_probability, 0.f); - const float leak_factor = buffer_is_full ? kLevelEstimatorLeakFactor : 1.f; + RTC_DCHECK_GT(speech_probability, 0.0f); + const float leak_factor = buffer_is_full ? kLevelEstimatorLeakFactor : 1.0f; preliminary_state_.level_dbfs.numerator = preliminary_state_.level_dbfs.numerator * leak_factor + - vad_level.rms_dbfs * vad_level.speech_probability; + rms_dbfs * speech_probability; preliminary_state_.level_dbfs.denominator = preliminary_state_.level_dbfs.denominator * leak_factor + - vad_level.speech_probability; + speech_probability; const float level_dbfs = preliminary_state_.level_dbfs.GetRatio(); @@ -131,14 +132,14 @@ bool AdaptiveModeLevelEstimator::IsConfident() const { void AdaptiveModeLevelEstimator::Reset() { ResetLevelEstimatorState(preliminary_state_); ResetLevelEstimatorState(reliable_state_); - level_dbfs_ = ClampLevelEstimateDbfs(kInitialSpeechLevelEstimateDbfs); + level_dbfs_ = initial_speech_level_dbfs_; num_adjacent_speech_frames_ = 0; } void AdaptiveModeLevelEstimator::ResetLevelEstimatorState( LevelEstimatorState& state) const { state.time_to_confidence_ms = kLevelEstimatorTimeToConfidenceMs; - state.level_dbfs.numerator = kInitialSpeechLevelEstimateDbfs; + state.level_dbfs.numerator = initial_speech_level_dbfs_; state.level_dbfs.denominator = 1.0f; } diff --git a/modules/audio_processing/agc2/adaptive_mode_level_estimator.h b/modules/audio_processing/agc2/adaptive_mode_level_estimator.h index 6d44938587..989c8c3572 100644 --- a/modules/audio_processing/agc2/adaptive_mode_level_estimator.h +++ b/modules/audio_processing/agc2/adaptive_mode_level_estimator.h @@ -12,10 +12,11 @@ #define MODULES_AUDIO_PROCESSING_AGC2_ADAPTIVE_MODE_LEVEL_ESTIMATOR_H_ #include + #include #include "modules/audio_processing/agc2/agc2_common.h" -#include "modules/audio_processing/agc2/vad_with_level.h" +#include "modules/audio_processing/agc2/vad_wrapper.h" #include "modules/audio_processing/include/audio_processing.h" namespace webrtc { @@ -24,15 +25,15 @@ class ApmDataDumper; // Level estimator for the digital adaptive gain controller. class AdaptiveModeLevelEstimator { public: - explicit AdaptiveModeLevelEstimator(ApmDataDumper* apm_data_dumper); + AdaptiveModeLevelEstimator( + ApmDataDumper* apm_data_dumper, + const AudioProcessing::Config::GainController2::AdaptiveDigital& config); AdaptiveModeLevelEstimator(const AdaptiveModeLevelEstimator&) = delete; AdaptiveModeLevelEstimator& operator=(const AdaptiveModeLevelEstimator&) = delete; - AdaptiveModeLevelEstimator(ApmDataDumper* apm_data_dumper, - int adjacent_speech_frames_threshold); // Updates the level estimation. - void Update(const VadLevelAnalyzer::Result& vad_data); + void Update(float rms_dbfs, float peak_dbfs, float speech_probability); // Returns the estimated speech plus noise level. float level_dbfs() const { return level_dbfs_; } // Returns true if the estimator is confident on its current estimate. @@ -47,14 +48,13 @@ class AdaptiveModeLevelEstimator { inline bool operator!=(const LevelEstimatorState& s) const { return !(*this == s); } + // TODO(bugs.webrtc.org/7494): Remove `time_to_confidence_ms` if redundant. + int time_to_confidence_ms; struct Ratio { float numerator; float denominator; float GetRatio() const; - }; - // TODO(crbug.com/webrtc/7494): Remove time_to_confidence_ms if redundant. - int time_to_confidence_ms; - Ratio level_dbfs; + } level_dbfs; }; static_assert(std::is_trivially_copyable::value, ""); @@ -64,6 +64,7 @@ class AdaptiveModeLevelEstimator { ApmDataDumper* const apm_data_dumper_; + const float initial_speech_level_dbfs_; const int adjacent_speech_frames_threshold_; LevelEstimatorState preliminary_state_; LevelEstimatorState reliable_state_; diff --git a/modules/audio_processing/agc2/adaptive_mode_level_estimator_unittest.cc b/modules/audio_processing/agc2/adaptive_mode_level_estimator_unittest.cc index c55950ac29..684fca188a 100644 --- a/modules/audio_processing/agc2/adaptive_mode_level_estimator_unittest.cc +++ b/modules/audio_processing/agc2/adaptive_mode_level_estimator_unittest.cc @@ -13,73 +13,79 @@ #include #include "modules/audio_processing/agc2/agc2_common.h" +#include "modules/audio_processing/include/audio_processing.h" #include "modules/audio_processing/logging/apm_data_dumper.h" #include "rtc_base/gunit.h" namespace webrtc { namespace { +using AdaptiveDigitalConfig = + AudioProcessing::Config::GainController2::AdaptiveDigital; + // Number of speech frames that the level estimator must observe in order to // become confident about the estimated level. constexpr int kNumFramesToConfidence = kLevelEstimatorTimeToConfidenceMs / kFrameDurationMs; static_assert(kNumFramesToConfidence > 0, ""); -// Fake levels and speech probabilities used in the tests. -static_assert(kInitialSpeechLevelEstimateDbfs < 0.0f, ""); -constexpr float kVadLevelRms = kInitialSpeechLevelEstimateDbfs / 2.0f; -constexpr float kVadLevelPeak = kInitialSpeechLevelEstimateDbfs / 3.0f; -static_assert(kVadLevelRms < kVadLevelPeak, ""); -static_assert(kVadLevelRms > kInitialSpeechLevelEstimateDbfs, ""); -static_assert(kVadLevelRms - kInitialSpeechLevelEstimateDbfs > 5.0f, - "Adjust `kVadLevelRms` so that the difference from the initial " - "level is wide enough for the tests."); - -constexpr VadLevelAnalyzer::Result kVadDataSpeech{/*speech_probability=*/1.0f, - kVadLevelRms, kVadLevelPeak}; -constexpr VadLevelAnalyzer::Result kVadDataNonSpeech{ - /*speech_probability=*/kVadConfidenceThreshold / 2.0f, kVadLevelRms, - kVadLevelPeak}; - -constexpr float kMinSpeechProbability = 0.0f; -constexpr float kMaxSpeechProbability = 1.0f; - constexpr float kConvergenceSpeedTestsLevelTolerance = 0.5f; // Provides the `vad_level` value `num_iterations` times to `level_estimator`. void RunOnConstantLevel(int num_iterations, - const VadLevelAnalyzer::Result& vad_level, + float rms_dbfs, + float peak_dbfs, + float speech_probability, AdaptiveModeLevelEstimator& level_estimator) { for (int i = 0; i < num_iterations; ++i) { - level_estimator.Update(vad_level); + level_estimator.Update(rms_dbfs, peak_dbfs, speech_probability); } } +constexpr AdaptiveDigitalConfig GetAdaptiveDigitalConfig( + int adjacent_speech_frames_threshold) { + AdaptiveDigitalConfig config; + config.adjacent_speech_frames_threshold = adjacent_speech_frames_threshold; + return config; +} + +constexpr float kNoSpeechProbability = 0.0f; +constexpr float kLowSpeechProbability = kVadConfidenceThreshold / 2.0f; +constexpr float kMaxSpeechProbability = 1.0f; + // Level estimator with data dumper. struct TestLevelEstimator { - TestLevelEstimator() + explicit TestLevelEstimator(int adjacent_speech_frames_threshold) : data_dumper(0), estimator(std::make_unique( &data_dumper, - /*adjacent_speech_frames_threshold=*/1)) {} + GetAdaptiveDigitalConfig(adjacent_speech_frames_threshold))), + initial_speech_level_dbfs(estimator->level_dbfs()), + level_rms_dbfs(initial_speech_level_dbfs / 2.0f), + level_peak_dbfs(initial_speech_level_dbfs / 3.0f) { + RTC_DCHECK_LT(level_rms_dbfs, level_peak_dbfs); + RTC_DCHECK_LT(initial_speech_level_dbfs, level_rms_dbfs); + RTC_DCHECK_GT(level_rms_dbfs - initial_speech_level_dbfs, 5.0f) + << "Adjust `level_rms_dbfs` so that the difference from the initial " + "level is wide enough for the tests"; + } ApmDataDumper data_dumper; std::unique_ptr estimator; + const float initial_speech_level_dbfs; + const float level_rms_dbfs; + const float level_peak_dbfs; }; -// Checks the initially estimated level. -TEST(GainController2AdaptiveModeLevelEstimator, CheckInitialEstimate) { - TestLevelEstimator level_estimator; - EXPECT_FLOAT_EQ(level_estimator.estimator->level_dbfs(), - kInitialSpeechLevelEstimateDbfs); -} - // Checks that the level estimator converges to a constant input speech level. TEST(GainController2AdaptiveModeLevelEstimator, LevelStabilizes) { - TestLevelEstimator level_estimator; - RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence, kVadDataSpeech, + TestLevelEstimator level_estimator(/*adjacent_speech_frames_threshold=*/1); + RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence, + level_estimator.level_rms_dbfs, + level_estimator.level_peak_dbfs, kMaxSpeechProbability, *level_estimator.estimator); const float estimated_level_dbfs = level_estimator.estimator->level_dbfs(); - RunOnConstantLevel(/*num_iterations=*/1, kVadDataSpeech, + RunOnConstantLevel(/*num_iterations=*/1, level_estimator.level_rms_dbfs, + level_estimator.level_peak_dbfs, kMaxSpeechProbability, *level_estimator.estimator); EXPECT_NEAR(level_estimator.estimator->level_dbfs(), estimated_level_dbfs, 0.1f); @@ -88,17 +94,21 @@ TEST(GainController2AdaptiveModeLevelEstimator, LevelStabilizes) { // Checks that the level controller does not become confident when too few // speech frames are observed. TEST(GainController2AdaptiveModeLevelEstimator, IsNotConfident) { - TestLevelEstimator level_estimator; + TestLevelEstimator level_estimator(/*adjacent_speech_frames_threshold=*/1); RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence / 2, - kVadDataSpeech, *level_estimator.estimator); + level_estimator.level_rms_dbfs, + level_estimator.level_peak_dbfs, kMaxSpeechProbability, + *level_estimator.estimator); EXPECT_FALSE(level_estimator.estimator->IsConfident()); } // Checks that the level controller becomes confident when enough speech frames // are observed. TEST(GainController2AdaptiveModeLevelEstimator, IsConfident) { - TestLevelEstimator level_estimator; - RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence, kVadDataSpeech, + TestLevelEstimator level_estimator(/*adjacent_speech_frames_threshold=*/1); + RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence, + level_estimator.level_rms_dbfs, + level_estimator.level_peak_dbfs, kMaxSpeechProbability, *level_estimator.estimator); EXPECT_TRUE(level_estimator.estimator->IsConfident()); } @@ -107,17 +117,17 @@ TEST(GainController2AdaptiveModeLevelEstimator, IsConfident) { // frames. TEST(GainController2AdaptiveModeLevelEstimator, EstimatorIgnoresNonSpeechFrames) { - TestLevelEstimator level_estimator; + TestLevelEstimator level_estimator(/*adjacent_speech_frames_threshold=*/1); // Simulate speech. - RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence, kVadDataSpeech, + RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence, + level_estimator.level_rms_dbfs, + level_estimator.level_peak_dbfs, kMaxSpeechProbability, *level_estimator.estimator); const float estimated_level_dbfs = level_estimator.estimator->level_dbfs(); // Simulate full-scale non-speech. RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence, - VadLevelAnalyzer::Result{kMinSpeechProbability, - /*rms_dbfs=*/0.0f, - /*peak_dbfs=*/0.0f}, - *level_estimator.estimator); + /*rms_dbfs=*/0.0f, /*peak_dbfs=*/0.0f, + kNoSpeechProbability, *level_estimator.estimator); // No estimated level change is expected. EXPECT_FLOAT_EQ(level_estimator.estimator->level_dbfs(), estimated_level_dbfs); @@ -126,28 +136,29 @@ TEST(GainController2AdaptiveModeLevelEstimator, // Checks the convergence speed of the estimator before it becomes confident. TEST(GainController2AdaptiveModeLevelEstimator, ConvergenceSpeedBeforeConfidence) { - TestLevelEstimator level_estimator; - RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence, kVadDataSpeech, + TestLevelEstimator level_estimator(/*adjacent_speech_frames_threshold=*/1); + RunOnConstantLevel(/*num_iterations=*/kNumFramesToConfidence, + level_estimator.level_rms_dbfs, + level_estimator.level_peak_dbfs, kMaxSpeechProbability, *level_estimator.estimator); - EXPECT_NEAR(level_estimator.estimator->level_dbfs(), kVadDataSpeech.rms_dbfs, + EXPECT_NEAR(level_estimator.estimator->level_dbfs(), + level_estimator.level_rms_dbfs, kConvergenceSpeedTestsLevelTolerance); } // Checks the convergence speed of the estimator after it becomes confident. TEST(GainController2AdaptiveModeLevelEstimator, ConvergenceSpeedAfterConfidence) { - TestLevelEstimator level_estimator; + TestLevelEstimator level_estimator(/*adjacent_speech_frames_threshold=*/1); // Reach confidence using the initial level estimate. RunOnConstantLevel( /*num_iterations=*/kNumFramesToConfidence, - VadLevelAnalyzer::Result{ - kMaxSpeechProbability, - /*rms_dbfs=*/kInitialSpeechLevelEstimateDbfs, - /*peak_dbfs=*/kInitialSpeechLevelEstimateDbfs + 6.0f}, - *level_estimator.estimator); + /*rms_dbfs=*/level_estimator.initial_speech_level_dbfs, + /*peak_dbfs=*/level_estimator.initial_speech_level_dbfs + 6.0f, + kMaxSpeechProbability, *level_estimator.estimator); // No estimate change should occur, but confidence is achieved. ASSERT_FLOAT_EQ(level_estimator.estimator->level_dbfs(), - kInitialSpeechLevelEstimateDbfs); + level_estimator.initial_speech_level_dbfs); ASSERT_TRUE(level_estimator.estimator->IsConfident()); // After confidence. constexpr float kConvergenceTimeAfterConfidenceNumFrames = 600; // 6 seconds. @@ -155,8 +166,10 @@ TEST(GainController2AdaptiveModeLevelEstimator, kConvergenceTimeAfterConfidenceNumFrames > kNumFramesToConfidence, ""); RunOnConstantLevel( /*num_iterations=*/kConvergenceTimeAfterConfidenceNumFrames, - kVadDataSpeech, *level_estimator.estimator); - EXPECT_NEAR(level_estimator.estimator->level_dbfs(), kVadDataSpeech.rms_dbfs, + level_estimator.level_rms_dbfs, level_estimator.level_peak_dbfs, + kMaxSpeechProbability, *level_estimator.estimator); + EXPECT_NEAR(level_estimator.estimator->level_dbfs(), + level_estimator.level_rms_dbfs, kConvergenceSpeedTestsLevelTolerance); } @@ -168,30 +181,32 @@ class AdaptiveModeLevelEstimatorParametrization TEST_P(AdaptiveModeLevelEstimatorParametrization, DoNotAdaptToShortSpeechSegments) { - ApmDataDumper apm_data_dumper(0); - AdaptiveModeLevelEstimator level_estimator( - &apm_data_dumper, adjacent_speech_frames_threshold()); - const float initial_level = level_estimator.level_dbfs(); - ASSERT_LT(initial_level, kVadDataSpeech.peak_dbfs); + TestLevelEstimator level_estimator(adjacent_speech_frames_threshold()); + const float initial_level = level_estimator.estimator->level_dbfs(); + ASSERT_LT(initial_level, level_estimator.level_peak_dbfs); for (int i = 0; i < adjacent_speech_frames_threshold() - 1; ++i) { SCOPED_TRACE(i); - level_estimator.Update(kVadDataSpeech); - EXPECT_EQ(initial_level, level_estimator.level_dbfs()); + level_estimator.estimator->Update(level_estimator.level_rms_dbfs, + level_estimator.level_peak_dbfs, + kMaxSpeechProbability); + EXPECT_EQ(initial_level, level_estimator.estimator->level_dbfs()); } - level_estimator.Update(kVadDataNonSpeech); - EXPECT_EQ(initial_level, level_estimator.level_dbfs()); + level_estimator.estimator->Update(level_estimator.level_rms_dbfs, + level_estimator.level_peak_dbfs, + kLowSpeechProbability); + EXPECT_EQ(initial_level, level_estimator.estimator->level_dbfs()); } TEST_P(AdaptiveModeLevelEstimatorParametrization, AdaptToEnoughSpeechSegments) { - ApmDataDumper apm_data_dumper(0); - AdaptiveModeLevelEstimator level_estimator( - &apm_data_dumper, adjacent_speech_frames_threshold()); - const float initial_level = level_estimator.level_dbfs(); - ASSERT_LT(initial_level, kVadDataSpeech.peak_dbfs); + TestLevelEstimator level_estimator(adjacent_speech_frames_threshold()); + const float initial_level = level_estimator.estimator->level_dbfs(); + ASSERT_LT(initial_level, level_estimator.level_peak_dbfs); for (int i = 0; i < adjacent_speech_frames_threshold(); ++i) { - level_estimator.Update(kVadDataSpeech); + level_estimator.estimator->Update(level_estimator.level_rms_dbfs, + level_estimator.level_peak_dbfs, + kMaxSpeechProbability); } - EXPECT_LT(initial_level, level_estimator.level_dbfs()); + EXPECT_LT(initial_level, level_estimator.estimator->level_dbfs()); } INSTANTIATE_TEST_SUITE_P(GainController2, diff --git a/modules/audio_processing/agc2/agc2_common.h b/modules/audio_processing/agc2/agc2_common.h index adb1614926..4af85527b8 100644 --- a/modules/audio_processing/agc2/agc2_common.h +++ b/modules/audio_processing/agc2/agc2_common.h @@ -24,40 +24,26 @@ constexpr int kFrameDurationMs = 10; constexpr int kSubFramesInFrame = 20; constexpr int kMaximalNumberOfSamplesPerChannel = 480; -// Adaptive digital gain applier settings below. -constexpr float kHeadroomDbfs = 1.0f; -constexpr float kMaxGainDb = 30.0f; -constexpr float kInitialAdaptiveDigitalGainDb = 8.0f; +// Adaptive digital gain applier settings. + // At what limiter levels should we start decreasing the adaptive digital gain. -constexpr float kLimiterThresholdForAgcGainDbfs = -kHeadroomDbfs; +constexpr float kLimiterThresholdForAgcGainDbfs = -1.0f; // This is the threshold for speech. Speech frames are used for updating the // speech level, measuring the amount of speech, and decide when to allow target -// gain reduction. +// gain changes. constexpr float kVadConfidenceThreshold = 0.95f; -// Adaptive digital level estimator parameters. // Number of milliseconds of speech frames to observe to make the estimator // confident. constexpr float kLevelEstimatorTimeToConfidenceMs = 400; constexpr float kLevelEstimatorLeakFactor = 1.0f - 1.0f / kLevelEstimatorTimeToConfidenceMs; -// Robust VAD probability and speech decisions. -constexpr int kDefaultLevelEstimatorAdjacentSpeechFramesThreshold = 12; - // Saturation Protector settings. constexpr float kSaturationProtectorInitialHeadroomDb = 20.0f; -constexpr float kSaturationProtectorExtraHeadroomDb = 5.0f; constexpr int kSaturationProtectorBufferSize = 4; -// Set the initial speech level estimate so that `kInitialAdaptiveDigitalGainDb` -// is applied at the beginning of the call. -constexpr float kInitialSpeechLevelEstimateDbfs = - -kSaturationProtectorExtraHeadroomDb - - kSaturationProtectorInitialHeadroomDb - kInitialAdaptiveDigitalGainDb - - kHeadroomDbfs; - // Number of interpolation points for each region of the limiter. // These values have been tuned to limit the interpolated gain curve error given // the limiter parameters and allowing a maximum error of +/- 32768^-1. diff --git a/modules/audio_processing/agc2/agc2_testing_common.h b/modules/audio_processing/agc2/agc2_testing_common.h index 4572d9cffd..afed97e83b 100644 --- a/modules/audio_processing/agc2/agc2_testing_common.h +++ b/modules/audio_processing/agc2/agc2_testing_common.h @@ -25,7 +25,7 @@ constexpr float kMaxS16 = static_cast(std::numeric_limits::max()); // Level Estimator test parameters. -constexpr float kDecayMs = 500.f; +constexpr float kDecayMs = 20.0f; // Limiter parameters. constexpr float kLimiterMaxInputLevelDbFs = 1.f; diff --git a/modules/audio_processing/agc2/biquad_filter.cc b/modules/audio_processing/agc2/biquad_filter.cc index da8557c190..453125fde7 100644 --- a/modules/audio_processing/agc2/biquad_filter.cc +++ b/modules/audio_processing/agc2/biquad_filter.cc @@ -10,26 +10,37 @@ #include "modules/audio_processing/agc2/biquad_filter.h" -#include +#include "rtc_base/arraysize.h" namespace webrtc { -// Transposed direct form I implementation of a bi-quad filter applied to an -// input signal |x| to produce an output signal |y|. +BiQuadFilter::BiQuadFilter(const Config& config) + : config_(config), state_({}) {} + +BiQuadFilter::~BiQuadFilter() = default; + +void BiQuadFilter::SetConfig(const Config& config) { + config_ = config; + state_ = {}; +} + +void BiQuadFilter::Reset() { + state_ = {}; +} + void BiQuadFilter::Process(rtc::ArrayView x, rtc::ArrayView y) { + RTC_DCHECK_EQ(x.size(), y.size()); for (size_t k = 0; k < x.size(); ++k) { - // Use temporary variable for x[k] to allow in-place function call - // (that x and y refer to the same array). + // Use a temporary variable for `x[k]` to allow in-place processing. const float tmp = x[k]; - y[k] = coefficients_.b[0] * tmp + coefficients_.b[1] * biquad_state_.b[0] + - coefficients_.b[2] * biquad_state_.b[1] - - coefficients_.a[0] * biquad_state_.a[0] - - coefficients_.a[1] * biquad_state_.a[1]; - biquad_state_.b[1] = biquad_state_.b[0]; - biquad_state_.b[0] = tmp; - biquad_state_.a[1] = biquad_state_.a[0]; - biquad_state_.a[0] = y[k]; + y[k] = config_.b[0] * tmp + config_.b[1] * state_.b[0] + + config_.b[2] * state_.b[1] - config_.a[0] * state_.a[0] - + config_.a[1] * state_.a[1]; + state_.b[1] = state_.b[0]; + state_.b[0] = tmp; + state_.a[1] = state_.a[0]; + state_.a[0] = y[k]; } } diff --git a/modules/audio_processing/agc2/biquad_filter.h b/modules/audio_processing/agc2/biquad_filter.h index 7bf3301e4b..5273ff9386 100644 --- a/modules/audio_processing/agc2/biquad_filter.h +++ b/modules/audio_processing/agc2/biquad_filter.h @@ -11,54 +11,44 @@ #ifndef MODULES_AUDIO_PROCESSING_AGC2_BIQUAD_FILTER_H_ #define MODULES_AUDIO_PROCESSING_AGC2_BIQUAD_FILTER_H_ -#include - #include "api/array_view.h" -#include "rtc_base/arraysize.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { +// Transposed direct form I implementation of a bi-quad filter. +// b[0] + b[1] • z^(-1) + b[2] • z^(-2) +// H(z) = ------------------------------------ +// 1 + a[1] • z^(-1) + a[2] • z^(-2) class BiQuadFilter { public: // Normalized filter coefficients. - // b_0 + b_1 • z^(-1) + b_2 • z^(-2) - // H(z) = --------------------------------- - // 1 + a_1 • z^(-1) + a_2 • z^(-2) - struct BiQuadCoefficients { - float b[3]; - float a[2]; + // Computed as `[b, a] = scipy.signal.butter(N=2, Wn, btype)`. + struct Config { + float b[3]; // b[0], b[1], b[2]. + float a[2]; // a[1], a[2]. }; - BiQuadFilter() = default; + explicit BiQuadFilter(const Config& config); + BiQuadFilter(const BiQuadFilter&) = delete; + BiQuadFilter& operator=(const BiQuadFilter&) = delete; + ~BiQuadFilter(); - void Initialize(const BiQuadCoefficients& coefficients) { - coefficients_ = coefficients; - } + // Sets the filter configuration and resets the internal state. + void SetConfig(const Config& config); - void Reset() { biquad_state_.Reset(); } + // Zeroes the filter state. + void Reset(); - // Produces a filtered output y of the input x. Both x and y need to - // have the same length. In-place modification is allowed. + // Filters `x` and writes the output in `y`, which must have the same length + // of `x`. In-place processing is supported. void Process(rtc::ArrayView x, rtc::ArrayView y); private: - struct BiQuadState { - BiQuadState() { Reset(); } - - void Reset() { - std::fill(b, b + arraysize(b), 0.f); - std::fill(a, a + arraysize(a), 0.f); - } - + Config config_; + struct State { float b[2]; float a[2]; - }; - - BiQuadState biquad_state_; - BiQuadCoefficients coefficients_; - - RTC_DISALLOW_COPY_AND_ASSIGN(BiQuadFilter); + } state_; }; } // namespace webrtc diff --git a/modules/audio_processing/agc2/biquad_filter_unittest.cc b/modules/audio_processing/agc2/biquad_filter_unittest.cc index cd9a272787..a53036b08e 100644 --- a/modules/audio_processing/agc2/biquad_filter_unittest.cc +++ b/modules/audio_processing/agc2/biquad_filter_unittest.cc @@ -19,11 +19,10 @@ #include "rtc_base/gunit.h" namespace webrtc { -namespace test { namespace { -constexpr size_t kFrameSize = 8; -constexpr size_t kNumFrames = 4; +constexpr int kFrameSize = 8; +constexpr int kNumFrames = 4; using FloatArraySequence = std::array, kNumFrames>; @@ -37,8 +36,8 @@ constexpr FloatArraySequence kBiQuadInputSeq = { {{22.645832f, -64.597153f, 55.462521f, -109.393188f, 10.117825f, -40.019642f, -98.612228f, -8.330326f}}}}; -// Generated via "B, A = scipy.signal.butter(2, 30/12000, btype='highpass')" -const BiQuadFilter::BiQuadCoefficients kBiQuadConfig = { +// Computed as `scipy.signal.butter(N=2, Wn=60/24000, btype='highpass')`. +constexpr BiQuadFilter::Config kBiQuadConfig{ {0.99446179f, -1.98892358f, 0.99446179f}, {-1.98889291f, 0.98895425f}}; @@ -57,22 +56,23 @@ constexpr FloatArraySequence kBiQuadOutputSeq = { {{24.84286614f, -62.18094158f, 57.91488056f, -106.65685933f, 13.38760103f, -36.60367134f, -94.44880104f, -3.59920354f}}}}; -// Fail for every pair from two equally sized rtc::ArrayView views such +// Fails for every pair from two equally sized rtc::ArrayView views such // that their relative error is above a given threshold. If the expected value -// of a pair is 0, the tolerance is used to check the absolute error. +// of a pair is 0, `tolerance` is used to check the absolute error. void ExpectNearRelative(rtc::ArrayView expected, rtc::ArrayView computed, const float tolerance) { // The relative error is undefined when the expected value is 0. - // When that happens, check the absolute error instead. |safe_den| is used + // When that happens, check the absolute error instead. `safe_den` is used // below to implement such logic. - auto safe_den = [](float x) { return (x == 0.f) ? 1.f : std::fabs(x); }; + auto safe_den = [](float x) { return (x == 0.0f) ? 1.0f : std::fabs(x); }; ASSERT_EQ(expected.size(), computed.size()); for (size_t i = 0; i < expected.size(); ++i) { const float abs_diff = std::fabs(expected[i] - computed[i]); // No failure when the values are equal. - if (abs_diff == 0.f) + if (abs_diff == 0.0f) { continue; + } SCOPED_TRACE(i); SCOPED_TRACE(expected[i]); SCOPED_TRACE(computed[i]); @@ -80,32 +80,32 @@ void ExpectNearRelative(rtc::ArrayView expected, } } -} // namespace - +// Checks that filtering works when different containers are used both as input +// and as output. TEST(BiQuadFilterTest, FilterNotInPlace) { - BiQuadFilter filter; - filter.Initialize(kBiQuadConfig); + BiQuadFilter filter(kBiQuadConfig); std::array samples; // TODO(https://bugs.webrtc.org/8948): Add when the issue is fixed. // FloatingPointExceptionObserver fpe_observer; - for (size_t i = 0; i < kNumFrames; ++i) { + for (int i = 0; i < kNumFrames; ++i) { SCOPED_TRACE(i); filter.Process(kBiQuadInputSeq[i], samples); ExpectNearRelative(kBiQuadOutputSeq[i], samples, 2e-4f); } } +// Checks that filtering works when the same container is used both as input and +// as output. TEST(BiQuadFilterTest, FilterInPlace) { - BiQuadFilter filter; - filter.Initialize(kBiQuadConfig); + BiQuadFilter filter(kBiQuadConfig); std::array samples; // TODO(https://bugs.webrtc.org/8948): Add when the issue is fixed. // FloatingPointExceptionObserver fpe_observer; - for (size_t i = 0; i < kNumFrames; ++i) { + for (int i = 0; i < kNumFrames; ++i) { SCOPED_TRACE(i); std::copy(kBiQuadInputSeq[i].begin(), kBiQuadInputSeq[i].end(), samples.begin()); @@ -114,23 +114,62 @@ TEST(BiQuadFilterTest, FilterInPlace) { } } +// Checks that different configurations produce different outputs. +TEST(BiQuadFilterTest, SetConfigDifferentOutput) { + BiQuadFilter filter(/*config=*/{{0.97803048f, -1.95606096f, 0.97803048f}, + {-1.95557824f, 0.95654368f}}); + + std::array samples1; + for (int i = 0; i < kNumFrames; ++i) { + filter.Process(kBiQuadInputSeq[i], samples1); + } + + filter.SetConfig( + {{0.09763107f, 0.19526215f, 0.09763107f}, {-0.94280904f, 0.33333333f}}); + std::array samples2; + for (int i = 0; i < kNumFrames; ++i) { + filter.Process(kBiQuadInputSeq[i], samples2); + } + + EXPECT_NE(samples1, samples2); +} + +// Checks that when `SetConfig()` is called but the filter coefficients are the +// same the filter state is reset. +TEST(BiQuadFilterTest, SetConfigResetsState) { + BiQuadFilter filter(kBiQuadConfig); + + std::array samples1; + for (int i = 0; i < kNumFrames; ++i) { + filter.Process(kBiQuadInputSeq[i], samples1); + } + + filter.SetConfig(kBiQuadConfig); + std::array samples2; + for (int i = 0; i < kNumFrames; ++i) { + filter.Process(kBiQuadInputSeq[i], samples2); + } + + EXPECT_EQ(samples1, samples2); +} + +// Checks that when `Reset()` is called the filter state is reset. TEST(BiQuadFilterTest, Reset) { - BiQuadFilter filter; - filter.Initialize(kBiQuadConfig); + BiQuadFilter filter(kBiQuadConfig); std::array samples1; - for (size_t i = 0; i < kNumFrames; ++i) { + for (int i = 0; i < kNumFrames; ++i) { filter.Process(kBiQuadInputSeq[i], samples1); } filter.Reset(); std::array samples2; - for (size_t i = 0; i < kNumFrames; ++i) { + for (int i = 0; i < kNumFrames; ++i) { filter.Process(kBiQuadInputSeq[i], samples2); } EXPECT_EQ(samples1, samples2); } -} // namespace test +} // namespace } // namespace webrtc diff --git a/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc b/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc index bc92613b69..221b499e32 100644 --- a/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc +++ b/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc @@ -105,7 +105,7 @@ std::vector SampleLimiterRegion(const LimiterDbGainCurve* limiter) { const auto interval = q.top(); q.pop(); - // Split |interval| and enqueue. + // Split `interval` and enqueue. double x_split = (interval.x0 + interval.x1) / 2.0; q.emplace(interval.x0, x_split, LimiterUnderApproximationNegativeError(limiter, interval.x0, @@ -135,7 +135,7 @@ std::vector SampleLimiterRegion(const LimiterDbGainCurve* limiter) { void PrecomputeKneeApproxParams(const LimiterDbGainCurve* limiter, test::InterpolatedParameters* parameters) { static_assert(kInterpolatedGainCurveKneePoints > 2, ""); - // Get |kInterpolatedGainCurveKneePoints| - 1 equally spaced points. + // Get `kInterpolatedGainCurveKneePoints` - 1 equally spaced points. const std::vector points = test::LinSpace( limiter->knee_start_linear(), limiter->limiter_start_linear(), kInterpolatedGainCurveKneePoints - 1); diff --git a/modules/audio_processing/agc2/compute_interpolated_gain_curve.h b/modules/audio_processing/agc2/compute_interpolated_gain_curve.h index 5f52441ec3..08b676f5fd 100644 --- a/modules/audio_processing/agc2/compute_interpolated_gain_curve.h +++ b/modules/audio_processing/agc2/compute_interpolated_gain_curve.h @@ -29,8 +29,8 @@ namespace test { // Knee and beyond-knee regions approximation parameters. // The gain curve is approximated as a piece-wise linear function. -// |approx_params_x_| are the boundaries between adjacent linear pieces, -// |approx_params_m_| and |approx_params_q_| are the slope and the y-intercept +// `approx_params_x_` are the boundaries between adjacent linear pieces, +// `approx_params_m_` and `approx_params_q_` are the slope and the y-intercept // values of each piece. struct InterpolatedParameters { std::array diff --git a/modules/audio_processing/agc2/down_sampler.cc b/modules/audio_processing/agc2/down_sampler.cc deleted file mode 100644 index fd1a2c3a46..0000000000 --- a/modules/audio_processing/agc2/down_sampler.cc +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "modules/audio_processing/agc2/down_sampler.h" - -#include - -#include - -#include "modules/audio_processing/agc2/biquad_filter.h" -#include "modules/audio_processing/logging/apm_data_dumper.h" -#include "rtc_base/checks.h" - -namespace webrtc { -namespace { - -constexpr int kChunkSizeMs = 10; -constexpr int kSampleRate8kHz = 8000; -constexpr int kSampleRate16kHz = 16000; -constexpr int kSampleRate32kHz = 32000; -constexpr int kSampleRate48kHz = 48000; - -// Bandlimiter coefficients computed based on that only -// the first 40 bins of the spectrum for the downsampled -// signal are used. -// [B,A] = butter(2,(41/64*4000)/8000) -const BiQuadFilter::BiQuadCoefficients kLowPassFilterCoefficients_16kHz = { - {0.1455f, 0.2911f, 0.1455f}, - {-0.6698f, 0.2520f}}; - -// [B,A] = butter(2,(41/64*4000)/16000) -const BiQuadFilter::BiQuadCoefficients kLowPassFilterCoefficients_32kHz = { - {0.0462f, 0.0924f, 0.0462f}, - {-1.3066f, 0.4915f}}; - -// [B,A] = butter(2,(41/64*4000)/24000) -const BiQuadFilter::BiQuadCoefficients kLowPassFilterCoefficients_48kHz = { - {0.0226f, 0.0452f, 0.0226f}, - {-1.5320f, 0.6224f}}; - -} // namespace - -DownSampler::DownSampler(ApmDataDumper* data_dumper) - : data_dumper_(data_dumper) { - Initialize(48000); -} -void DownSampler::Initialize(int sample_rate_hz) { - RTC_DCHECK( - sample_rate_hz == kSampleRate8kHz || sample_rate_hz == kSampleRate16kHz || - sample_rate_hz == kSampleRate32kHz || sample_rate_hz == kSampleRate48kHz); - - sample_rate_hz_ = sample_rate_hz; - down_sampling_factor_ = rtc::CheckedDivExact(sample_rate_hz_, 8000); - - /// Note that the down sampling filter is not used if the sample rate is 8 - /// kHz. - if (sample_rate_hz_ == kSampleRate16kHz) { - low_pass_filter_.Initialize(kLowPassFilterCoefficients_16kHz); - } else if (sample_rate_hz_ == kSampleRate32kHz) { - low_pass_filter_.Initialize(kLowPassFilterCoefficients_32kHz); - } else if (sample_rate_hz_ == kSampleRate48kHz) { - low_pass_filter_.Initialize(kLowPassFilterCoefficients_48kHz); - } -} - -void DownSampler::DownSample(rtc::ArrayView in, - rtc::ArrayView out) { - data_dumper_->DumpWav("agc2_down_sampler_input", in, sample_rate_hz_, 1); - RTC_DCHECK_EQ(sample_rate_hz_ * kChunkSizeMs / 1000, in.size()); - RTC_DCHECK_EQ(kSampleRate8kHz * kChunkSizeMs / 1000, out.size()); - const size_t kMaxNumFrames = kSampleRate48kHz * kChunkSizeMs / 1000; - float x[kMaxNumFrames]; - - // Band-limit the signal to 4 kHz. - if (sample_rate_hz_ != kSampleRate8kHz) { - low_pass_filter_.Process(in, rtc::ArrayView(x, in.size())); - - // Downsample the signal. - size_t k = 0; - for (size_t j = 0; j < out.size(); ++j) { - RTC_DCHECK_GT(kMaxNumFrames, k); - out[j] = x[k]; - k += down_sampling_factor_; - } - } else { - std::copy(in.data(), in.data() + in.size(), out.data()); - } - - data_dumper_->DumpWav("agc2_down_sampler_output", out, kSampleRate8kHz, 1); -} - -} // namespace webrtc diff --git a/modules/audio_processing/agc2/down_sampler.h b/modules/audio_processing/agc2/down_sampler.h deleted file mode 100644 index a44f96fa2d..0000000000 --- a/modules/audio_processing/agc2/down_sampler.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef MODULES_AUDIO_PROCESSING_AGC2_DOWN_SAMPLER_H_ -#define MODULES_AUDIO_PROCESSING_AGC2_DOWN_SAMPLER_H_ - -#include "api/array_view.h" -#include "modules/audio_processing/agc2/biquad_filter.h" - -namespace webrtc { - -class ApmDataDumper; - -class DownSampler { - public: - explicit DownSampler(ApmDataDumper* data_dumper); - - DownSampler() = delete; - DownSampler(const DownSampler&) = delete; - DownSampler& operator=(const DownSampler&) = delete; - - void Initialize(int sample_rate_hz); - - void DownSample(rtc::ArrayView in, rtc::ArrayView out); - - private: - ApmDataDumper* const data_dumper_; - int sample_rate_hz_; - int down_sampling_factor_; - BiQuadFilter low_pass_filter_; -}; - -} // namespace webrtc - -#endif // MODULES_AUDIO_PROCESSING_AGC2_DOWN_SAMPLER_H_ diff --git a/modules/audio_processing/agc2/fixed_digital_level_estimator.cc b/modules/audio_processing/agc2/fixed_digital_level_estimator.cc index 3e9bb2efbd..1995b24913 100644 --- a/modules/audio_processing/agc2/fixed_digital_level_estimator.cc +++ b/modules/audio_processing/agc2/fixed_digital_level_estimator.cc @@ -20,15 +20,16 @@ namespace webrtc { namespace { -constexpr float kInitialFilterStateLevel = 0.f; +constexpr float kInitialFilterStateLevel = 0.0f; // Instant attack. -constexpr float kAttackFilterConstant = 0.f; -// This is computed from kDecayMs by -// 10 ** (-1/20 * subframe_duration / kDecayMs). -// |subframe_duration| is |kFrameDurationMs / kSubFramesInFrame|. -// kDecayMs is defined in agc2_testing_common.h -constexpr float kDecayFilterConstant = 0.9998848773724686f; +constexpr float kAttackFilterConstant = 0.0f; + +// Limiter decay constant. +// Computed as `10 ** (-1/20 * subframe_duration / kDecayMs)` where: +// - `subframe_duration` is `kFrameDurationMs / kSubFramesInFrame`; +// - `kDecayMs` is defined in agc2_testing_common.h. +constexpr float kDecayFilterConstant = 0.9971259f; } // namespace @@ -57,7 +58,7 @@ std::array FixedDigitalLevelEstimator::ComputeLevel( // Compute max envelope without smoothing. std::array envelope{}; - for (size_t channel_idx = 0; channel_idx < float_frame.num_channels(); + for (int channel_idx = 0; channel_idx < float_frame.num_channels(); ++channel_idx) { const auto channel = float_frame.channel(channel_idx); for (int sub_frame = 0; sub_frame < kSubFramesInFrame; ++sub_frame) { diff --git a/modules/audio_processing/agc2/fixed_digital_level_estimator.h b/modules/audio_processing/agc2/fixed_digital_level_estimator.h index d96aedaf9e..d26b55950c 100644 --- a/modules/audio_processing/agc2/fixed_digital_level_estimator.h +++ b/modules/audio_processing/agc2/fixed_digital_level_estimator.h @@ -16,7 +16,6 @@ #include "modules/audio_processing/agc2/agc2_common.h" #include "modules/audio_processing/include/audio_frame_view.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -34,6 +33,10 @@ class FixedDigitalLevelEstimator { FixedDigitalLevelEstimator(int sample_rate_hz, ApmDataDumper* apm_data_dumper); + FixedDigitalLevelEstimator(const FixedDigitalLevelEstimator&) = delete; + FixedDigitalLevelEstimator& operator=(const FixedDigitalLevelEstimator&) = + delete; + // The input is assumed to be in FloatS16 format. Scaled input will // produce similarly scaled output. A frame of with kFrameDurationMs // ms of audio produces a level estimates in the same scale. The @@ -57,8 +60,6 @@ class FixedDigitalLevelEstimator { float filter_state_level_; int samples_in_frame_; int samples_in_sub_frame_; - - RTC_DISALLOW_COPY_AND_ASSIGN(FixedDigitalLevelEstimator); }; } // namespace webrtc diff --git a/modules/audio_processing/agc2/gain_applier.cc b/modules/audio_processing/agc2/gain_applier.cc index 8c437177e3..f9e276d3a8 100644 --- a/modules/audio_processing/agc2/gain_applier.cc +++ b/modules/audio_processing/agc2/gain_applier.cc @@ -25,7 +25,7 @@ bool GainCloseToOne(float gain_factor) { } void ClipSignal(AudioFrameView signal) { - for (size_t k = 0; k < signal.num_channels(); ++k) { + for (int k = 0; k < signal.num_channels(); ++k) { rtc::ArrayView channel_view = signal.channel(k); for (auto& sample : channel_view) { sample = rtc::SafeClamp(sample, kMinFloatS16Value, kMaxFloatS16Value); @@ -45,7 +45,7 @@ void ApplyGainWithRamping(float last_gain_linear, // Gain is constant and different from 1. if (last_gain_linear == gain_at_end_of_frame_linear) { - for (size_t k = 0; k < float_frame.num_channels(); ++k) { + for (int k = 0; k < float_frame.num_channels(); ++k) { rtc::ArrayView channel_view = float_frame.channel(k); for (auto& sample : channel_view) { sample *= gain_at_end_of_frame_linear; @@ -58,8 +58,8 @@ void ApplyGainWithRamping(float last_gain_linear, const float increment = (gain_at_end_of_frame_linear - last_gain_linear) * inverse_samples_per_channel; float gain = last_gain_linear; - for (size_t i = 0; i < float_frame.samples_per_channel(); ++i) { - for (size_t ch = 0; ch < float_frame.num_channels(); ++ch) { + for (int i = 0; i < float_frame.samples_per_channel(); ++i) { + for (int ch = 0; ch < float_frame.num_channels(); ++ch) { float_frame.channel(ch)[i] *= gain; } gain += increment; @@ -88,12 +88,13 @@ void GainApplier::ApplyGain(AudioFrameView signal) { } } +// TODO(bugs.webrtc.org/7494): Remove once switched to gains in dB. void GainApplier::SetGainFactor(float gain_factor) { RTC_DCHECK_GT(gain_factor, 0.f); current_gain_factor_ = gain_factor; } -void GainApplier::Initialize(size_t samples_per_channel) { +void GainApplier::Initialize(int samples_per_channel) { RTC_DCHECK_GT(samples_per_channel, 0); samples_per_channel_ = static_cast(samples_per_channel); inverse_samples_per_channel_ = 1.f / samples_per_channel_; diff --git a/modules/audio_processing/agc2/gain_applier.h b/modules/audio_processing/agc2/gain_applier.h index d9aa19d1aa..ba8a4a4cd2 100644 --- a/modules/audio_processing/agc2/gain_applier.h +++ b/modules/audio_processing/agc2/gain_applier.h @@ -25,7 +25,7 @@ class GainApplier { float GetGainFactor() const { return current_gain_factor_; } private: - void Initialize(size_t samples_per_channel); + void Initialize(int samples_per_channel); // Whether to clip samples after gain is applied. If 'true', result // will fit in FloatS16 range. diff --git a/modules/audio_processing/agc2/interpolated_gain_curve.cc b/modules/audio_processing/agc2/interpolated_gain_curve.cc index 3dd501096a..b522ec372c 100644 --- a/modules/audio_processing/agc2/interpolated_gain_curve.cc +++ b/modules/audio_processing/agc2/interpolated_gain_curve.cc @@ -115,7 +115,7 @@ void InterpolatedGainCurve::RegionLogger::LogRegionStats( break; } default: { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } } } @@ -151,11 +151,11 @@ void InterpolatedGainCurve::UpdateStats(float input_level) const { } // Looks up a gain to apply given a non-negative input level. -// The cost of this operation depends on the region in which |input_level| +// The cost of this operation depends on the region in which `input_level` // falls. // For the identity and the saturation regions the cost is O(1). // For the other regions, namely knee and limiter, the cost is -// O(2 + log2(|LightkInterpolatedGainCurveTotalPoints|), plus O(1) for the +// O(2 + log2(`LightkInterpolatedGainCurveTotalPoints`), plus O(1) for the // linear interpolation (one product and one sum). float InterpolatedGainCurve::LookUpGainToApply(float input_level) const { UpdateStats(input_level); diff --git a/modules/audio_processing/agc2/interpolated_gain_curve.h b/modules/audio_processing/agc2/interpolated_gain_curve.h index af993204ce..b1a5cf473b 100644 --- a/modules/audio_processing/agc2/interpolated_gain_curve.h +++ b/modules/audio_processing/agc2/interpolated_gain_curve.h @@ -15,7 +15,6 @@ #include #include "modules/audio_processing/agc2/agc2_common.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/gtest_prod_util.h" #include "system_wrappers/include/metrics.h" @@ -64,6 +63,9 @@ class InterpolatedGainCurve { const std::string& histogram_name_prefix); ~InterpolatedGainCurve(); + InterpolatedGainCurve(const InterpolatedGainCurve&) = delete; + InterpolatedGainCurve& operator=(const InterpolatedGainCurve&) = delete; + Stats get_stats() const { return stats_; } // Given a non-negative input level (linear scale), a scalar factor to apply @@ -143,8 +145,6 @@ class InterpolatedGainCurve { // Stats. mutable Stats stats_; - - RTC_DISALLOW_COPY_AND_ASSIGN(InterpolatedGainCurve); }; } // namespace webrtc diff --git a/modules/audio_processing/agc2/limiter.cc b/modules/audio_processing/agc2/limiter.cc index ed7d3ee5f2..57580924dc 100644 --- a/modules/audio_processing/agc2/limiter.cc +++ b/modules/audio_processing/agc2/limiter.cc @@ -18,6 +18,7 @@ #include "modules/audio_processing/agc2/agc2_common.h" #include "modules/audio_processing/logging/apm_data_dumper.h" #include "rtc_base/checks.h" +#include "rtc_base/numerics/safe_conversions.h" #include "rtc_base/numerics/safe_minmax.h" namespace webrtc { @@ -29,14 +30,14 @@ namespace { // sub-frame, linear interpolation is replaced with a power function which // reduces the chances of over-shooting (and hence saturation), however reducing // the fixed gain effectiveness. -constexpr float kAttackFirstSubframeInterpolationPower = 8.f; +constexpr float kAttackFirstSubframeInterpolationPower = 8.0f; void InterpolateFirstSubframe(float last_factor, float current_factor, rtc::ArrayView subframe) { - const auto n = subframe.size(); - constexpr auto p = kAttackFirstSubframeInterpolationPower; - for (size_t i = 0; i < n; ++i) { + const int n = rtc::dchecked_cast(subframe.size()); + constexpr float p = kAttackFirstSubframeInterpolationPower; + for (int i = 0; i < n; ++i) { subframe[i] = std::pow(1.f - i / n, p) * (last_factor - current_factor) + current_factor; } @@ -44,10 +45,10 @@ void InterpolateFirstSubframe(float last_factor, void ComputePerSampleSubframeFactors( const std::array& scaling_factors, - size_t samples_per_channel, + int samples_per_channel, rtc::ArrayView per_sample_scaling_factors) { - const size_t num_subframes = scaling_factors.size() - 1; - const size_t subframe_size = + const int num_subframes = scaling_factors.size() - 1; + const int subframe_size = rtc::CheckedDivExact(samples_per_channel, num_subframes); // Handle first sub-frame differently in case of attack. @@ -59,12 +60,12 @@ void ComputePerSampleSubframeFactors( per_sample_scaling_factors.subview(0, subframe_size))); } - for (size_t i = is_attack ? 1 : 0; i < num_subframes; ++i) { - const size_t subframe_start = i * subframe_size; + for (int i = is_attack ? 1 : 0; i < num_subframes; ++i) { + const int subframe_start = i * subframe_size; const float scaling_start = scaling_factors[i]; const float scaling_end = scaling_factors[i + 1]; const float scaling_diff = (scaling_end - scaling_start) / subframe_size; - for (size_t j = 0; j < subframe_size; ++j) { + for (int j = 0; j < subframe_size; ++j) { per_sample_scaling_factors[subframe_start + j] = scaling_start + scaling_diff * j; } @@ -73,18 +74,18 @@ void ComputePerSampleSubframeFactors( void ScaleSamples(rtc::ArrayView per_sample_scaling_factors, AudioFrameView signal) { - const size_t samples_per_channel = signal.samples_per_channel(); + const int samples_per_channel = signal.samples_per_channel(); RTC_DCHECK_EQ(samples_per_channel, per_sample_scaling_factors.size()); - for (size_t i = 0; i < signal.num_channels(); ++i) { - auto channel = signal.channel(i); - for (size_t j = 0; j < samples_per_channel; ++j) { + for (int i = 0; i < signal.num_channels(); ++i) { + rtc::ArrayView channel = signal.channel(i); + for (int j = 0; j < samples_per_channel; ++j) { channel[j] = rtc::SafeClamp(channel[j] * per_sample_scaling_factors[j], kMinFloatS16Value, kMaxFloatS16Value); } } } -void CheckLimiterSampleRate(size_t sample_rate_hz) { +void CheckLimiterSampleRate(int sample_rate_hz) { // Check that per_sample_scaling_factors_ is large enough. RTC_DCHECK_LE(sample_rate_hz, kMaximalNumberOfSamplesPerChannel * 1000 / kFrameDurationMs); @@ -92,7 +93,7 @@ void CheckLimiterSampleRate(size_t sample_rate_hz) { } // namespace -Limiter::Limiter(size_t sample_rate_hz, +Limiter::Limiter(int sample_rate_hz, ApmDataDumper* apm_data_dumper, const std::string& histogram_name) : interp_gain_curve_(apm_data_dumper, histogram_name), @@ -104,7 +105,8 @@ Limiter::Limiter(size_t sample_rate_hz, Limiter::~Limiter() = default; void Limiter::Process(AudioFrameView signal) { - const auto level_estimate = level_estimator_.ComputeLevel(signal); + const std::array level_estimate = + level_estimator_.ComputeLevel(signal); RTC_DCHECK_EQ(level_estimate.size() + 1, scaling_factors_.size()); scaling_factors_[0] = last_scaling_factor_; @@ -113,7 +115,7 @@ void Limiter::Process(AudioFrameView signal) { return interp_gain_curve_.LookUpGainToApply(x); }); - const size_t samples_per_channel = signal.samples_per_channel(); + const int samples_per_channel = signal.samples_per_channel(); RTC_DCHECK_LE(samples_per_channel, kMaximalNumberOfSamplesPerChannel); auto per_sample_scaling_factors = rtc::ArrayView( @@ -136,7 +138,7 @@ InterpolatedGainCurve::Stats Limiter::GetGainCurveStats() const { return interp_gain_curve_.get_stats(); } -void Limiter::SetSampleRate(size_t sample_rate_hz) { +void Limiter::SetSampleRate(int sample_rate_hz) { CheckLimiterSampleRate(sample_rate_hz); level_estimator_.SetSampleRate(sample_rate_hz); } diff --git a/modules/audio_processing/agc2/limiter.h b/modules/audio_processing/agc2/limiter.h index df7b540b70..669e202c20 100644 --- a/modules/audio_processing/agc2/limiter.h +++ b/modules/audio_processing/agc2/limiter.h @@ -17,21 +17,20 @@ #include "modules/audio_processing/agc2/fixed_digital_level_estimator.h" #include "modules/audio_processing/agc2/interpolated_gain_curve.h" #include "modules/audio_processing/include/audio_frame_view.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { class ApmDataDumper; class Limiter { public: - Limiter(size_t sample_rate_hz, + Limiter(int sample_rate_hz, ApmDataDumper* apm_data_dumper, const std::string& histogram_name_prefix); Limiter(const Limiter& limiter) = delete; Limiter& operator=(const Limiter& limiter) = delete; ~Limiter(); - // Applies limiter and hard-clipping to |signal|. + // Applies limiter and hard-clipping to `signal`. void Process(AudioFrameView signal); InterpolatedGainCurve::Stats GetGainCurveStats() const; @@ -40,7 +39,7 @@ class Limiter { // * below kMaximalNumberOfSamplesPerChannel*1000/kFrameDurationMs // so that samples_per_channel fit in the // per_sample_scaling_factors_ array. - void SetSampleRate(size_t sample_rate_hz); + void SetSampleRate(int sample_rate_hz); // Resets the internal state. void Reset(); diff --git a/modules/audio_processing/agc2/limiter_db_gain_curve.cc b/modules/audio_processing/agc2/limiter_db_gain_curve.cc index d55ed5df58..d47c0b2e17 100644 --- a/modules/audio_processing/agc2/limiter_db_gain_curve.cc +++ b/modules/audio_processing/agc2/limiter_db_gain_curve.cc @@ -105,7 +105,7 @@ double LimiterDbGainCurve::GetGainLinear(double input_level_linear) const { input_level_linear; } -// Computes the first derivative of GetGainLinear() in |x|. +// Computes the first derivative of GetGainLinear() in `x`. double LimiterDbGainCurve::GetGainFirstDerivativeLinear(double x) const { // Beyond-knee region only. RTC_CHECK_GE(x, limiter_start_linear_ - 1e-7 * kMaxAbsFloatS16Value); diff --git a/modules/audio_processing/agc2/noise_level_estimator.cc b/modules/audio_processing/agc2/noise_level_estimator.cc index 10e8437d3f..9fb1c24b65 100644 --- a/modules/audio_processing/agc2/noise_level_estimator.cc +++ b/modules/audio_processing/agc2/noise_level_estimator.cc @@ -17,18 +17,17 @@ #include #include "api/array_view.h" -#include "common_audio/include/audio_util.h" -#include "modules/audio_processing/agc2/signal_classifier.h" #include "modules/audio_processing/logging/apm_data_dumper.h" #include "rtc_base/checks.h" namespace webrtc { namespace { + constexpr int kFramesPerSecond = 100; float FrameEnergy(const AudioFrameView& audio) { float energy = 0.0f; - for (size_t k = 0; k < audio.num_channels(); ++k) { + for (int k = 0; k < audio.num_channels(); ++k) { float channel_energy = std::accumulate(audio.channel(k).begin(), audio.channel(k).end(), 0.0f, [](float a, float b) -> float { return a + b * b; }); @@ -37,107 +36,15 @@ float FrameEnergy(const AudioFrameView& audio) { return energy; } -float EnergyToDbfs(float signal_energy, size_t num_samples) { - const float rms = std::sqrt(signal_energy / num_samples); - return FloatS16ToDbfs(rms); -} - -class NoiseLevelEstimatorImpl : public NoiseLevelEstimator { - public: - NoiseLevelEstimatorImpl(ApmDataDumper* data_dumper) - : data_dumper_(data_dumper), signal_classifier_(data_dumper) { - // Initially assume that 48 kHz will be used. `Analyze()` will detect the - // used sample rate and call `Initialize()` again if needed. - Initialize(/*sample_rate_hz=*/48000); - } - NoiseLevelEstimatorImpl(const NoiseLevelEstimatorImpl&) = delete; - NoiseLevelEstimatorImpl& operator=(const NoiseLevelEstimatorImpl&) = delete; - ~NoiseLevelEstimatorImpl() = default; - - float Analyze(const AudioFrameView& frame) override { - data_dumper_->DumpRaw("agc2_noise_level_estimator_hold_counter", - noise_energy_hold_counter_); - const int sample_rate_hz = - static_cast(frame.samples_per_channel() * kFramesPerSecond); - if (sample_rate_hz != sample_rate_hz_) { - Initialize(sample_rate_hz); - } - const float frame_energy = FrameEnergy(frame); - if (frame_energy <= 0.f) { - RTC_DCHECK_GE(frame_energy, 0.f); - data_dumper_->DumpRaw("agc2_noise_level_estimator_signal_type", -1); - return EnergyToDbfs(noise_energy_, frame.samples_per_channel()); - } - - if (first_update_) { - // Initialize the noise energy to the frame energy. - first_update_ = false; - data_dumper_->DumpRaw("agc2_noise_level_estimator_signal_type", -1); - noise_energy_ = std::max(frame_energy, min_noise_energy_); - return EnergyToDbfs(noise_energy_, frame.samples_per_channel()); - } - - const SignalClassifier::SignalType signal_type = - signal_classifier_.Analyze(frame.channel(0)); - data_dumper_->DumpRaw("agc2_noise_level_estimator_signal_type", - static_cast(signal_type)); - - // Update the noise estimate in a minimum statistics-type manner. - if (signal_type == SignalClassifier::SignalType::kStationary) { - if (frame_energy > noise_energy_) { - // Leak the estimate upwards towards the frame energy if no recent - // downward update. - noise_energy_hold_counter_ = - std::max(noise_energy_hold_counter_ - 1, 0); - - if (noise_energy_hold_counter_ == 0) { - constexpr float kMaxNoiseEnergyFactor = 1.01f; - noise_energy_ = - std::min(noise_energy_ * kMaxNoiseEnergyFactor, frame_energy); - } - } else { - // Update smoothly downwards with a limited maximum update magnitude. - constexpr float kMinNoiseEnergyFactor = 0.9f; - constexpr float kNoiseEnergyDeltaFactor = 0.05f; - noise_energy_ = - std::max(noise_energy_ * kMinNoiseEnergyFactor, - noise_energy_ - kNoiseEnergyDeltaFactor * - (noise_energy_ - frame_energy)); - // Prevent an energy increase for the next 10 seconds. - constexpr int kNumFramesToEnergyIncreaseAllowed = 1000; - noise_energy_hold_counter_ = kNumFramesToEnergyIncreaseAllowed; - } - } else { - // TODO(bugs.webrtc.org/7494): Remove to not forget the estimated level. - // For a non-stationary signal, leak the estimate downwards in order to - // avoid estimate locking due to incorrect signal classification. - noise_energy_ = noise_energy_ * 0.99f; - } - - // Ensure a minimum of the estimate. - noise_energy_ = std::max(noise_energy_, min_noise_energy_); - return EnergyToDbfs(noise_energy_, frame.samples_per_channel()); - } - - private: - void Initialize(int sample_rate_hz) { - sample_rate_hz_ = sample_rate_hz; - noise_energy_ = 1.0f; - first_update_ = true; - // Initialize the minimum noise energy to -84 dBFS. - min_noise_energy_ = sample_rate_hz * 2.0f * 2.0f / kFramesPerSecond; - noise_energy_hold_counter_ = 0; - signal_classifier_.Initialize(sample_rate_hz); +float EnergyToDbfs(float signal_energy, int num_samples) { + RTC_DCHECK_GE(signal_energy, 0.0f); + const float rms_square = signal_energy / num_samples; + constexpr float kMinDbfs = -90.30899869919436f; + if (rms_square <= 1.0f) { + return kMinDbfs; } - - ApmDataDumper* const data_dumper_; - int sample_rate_hz_; - float min_noise_energy_; - bool first_update_; - float noise_energy_; - int noise_energy_hold_counter_; - SignalClassifier signal_classifier_; -}; + return 10.0f * std::log10(rms_square) + kMinDbfs; +} // Updates the noise floor with instant decay and slow attack. This tuning is // specific for AGC2, so that (i) it can promptly increase the gain if the noise @@ -186,7 +93,8 @@ class NoiseFloorEstimator : public NoiseLevelEstimator { // Ignore frames when muted or below the minimum measurable energy. data_dumper_->DumpRaw("agc2_noise_floor_estimator_preliminary_level", noise_energy_); - return EnergyToDbfs(noise_energy_, frame.samples_per_channel()); + return EnergyToDbfs(noise_energy_, + static_cast(frame.samples_per_channel())); } if (preliminary_noise_energy_set_) { @@ -220,7 +128,8 @@ class NoiseFloorEstimator : public NoiseLevelEstimator { noise_energy_ = std::min(noise_energy_, preliminary_noise_energy_); counter_--; } - return EnergyToDbfs(noise_energy_, frame.samples_per_channel()); + return EnergyToDbfs(noise_energy_, + static_cast(frame.samples_per_channel())); } private: @@ -247,11 +156,6 @@ class NoiseFloorEstimator : public NoiseLevelEstimator { } // namespace -std::unique_ptr CreateStationaryNoiseEstimator( - ApmDataDumper* data_dumper) { - return std::make_unique(data_dumper); -} - std::unique_ptr CreateNoiseFloorEstimator( ApmDataDumper* data_dumper) { return std::make_unique(data_dumper); diff --git a/modules/audio_processing/agc2/noise_level_estimator.h b/modules/audio_processing/agc2/noise_level_estimator.h index 94aecda7fc..9f3b957486 100644 --- a/modules/audio_processing/agc2/noise_level_estimator.h +++ b/modules/audio_processing/agc2/noise_level_estimator.h @@ -27,10 +27,6 @@ class NoiseLevelEstimator { virtual float Analyze(const AudioFrameView& frame) = 0; }; -// Creates a noise level estimator based on stationarity detection. -std::unique_ptr CreateStationaryNoiseEstimator( - ApmDataDumper* data_dumper); - // Creates a noise level estimator based on noise floor detection. std::unique_ptr CreateNoiseFloorEstimator( ApmDataDumper* data_dumper); diff --git a/modules/audio_processing/agc2/noise_level_estimator_unittest.cc b/modules/audio_processing/agc2/noise_level_estimator_unittest.cc index 51ad1ba00a..8168c5a229 100644 --- a/modules/audio_processing/agc2/noise_level_estimator_unittest.cc +++ b/modules/audio_processing/agc2/noise_level_estimator_unittest.cc @@ -50,45 +50,6 @@ class NoiseEstimatorParametrization : public ::testing::TestWithParam { int sample_rate_hz() const { return GetParam(); } }; -// White random noise is stationary, but does not trigger the detector -// every frame due to the randomness. -TEST_P(NoiseEstimatorParametrization, StationaryNoiseEstimatorWithRandomNoise) { - ApmDataDumper data_dumper(0); - auto estimator = CreateStationaryNoiseEstimator(&data_dumper); - - test::WhiteNoiseGenerator gen(/*min_amplitude=*/test::kMinS16, - /*max_amplitude=*/test::kMaxS16); - const float noise_level_dbfs = - RunEstimator(gen, *estimator, sample_rate_hz()); - EXPECT_NEAR(noise_level_dbfs, -5.5f, 1.0f); -} - -// Sine curves are (very) stationary. They trigger the detector all -// the time. Except for a few initial frames. -TEST_P(NoiseEstimatorParametrization, StationaryNoiseEstimatorWithSineTone) { - ApmDataDumper data_dumper(0); - auto estimator = CreateStationaryNoiseEstimator(&data_dumper); - - test::SineGenerator gen(/*amplitude=*/test::kMaxS16, /*frequency_hz=*/600.0f, - sample_rate_hz()); - const float noise_level_dbfs = - RunEstimator(gen, *estimator, sample_rate_hz()); - EXPECT_NEAR(noise_level_dbfs, -3.0f, 1.0f); -} - -// Pulses are transient if they are far enough apart. They shouldn't -// trigger the noise detector. -TEST_P(NoiseEstimatorParametrization, StationaryNoiseEstimatorWithPulseTone) { - ApmDataDumper data_dumper(0); - auto estimator = CreateStationaryNoiseEstimator(&data_dumper); - - test::PulseGenerator gen(/*pulse_amplitude=*/test::kMaxS16, - /*no_pulse_amplitude=*/10.0f, /*frequency_hz=*/20.0f, - sample_rate_hz()); - const int noise_level_dbfs = RunEstimator(gen, *estimator, sample_rate_hz()); - EXPECT_NEAR(noise_level_dbfs, -79.0f, 1.0f); -} - // Checks that full scale white noise maps to about -5.5 dBFS. TEST_P(NoiseEstimatorParametrization, NoiseFloorEstimatorWithRandomNoise) { ApmDataDumper data_dumper(0); @@ -122,7 +83,8 @@ TEST_P(NoiseEstimatorParametrization, NoiseFloorEstimatorWithPulseTone) { constexpr float kNoPulseAmplitude = 10.0f; test::PulseGenerator gen(/*pulse_amplitude=*/test::kMaxS16, kNoPulseAmplitude, /*frequency_hz=*/20.0f, sample_rate_hz()); - const int noise_level_dbfs = RunEstimator(gen, *estimator, sample_rate_hz()); + const float noise_level_dbfs = + RunEstimator(gen, *estimator, sample_rate_hz()); const float expected_noise_floor_dbfs = 20.0f * std::log10f(kNoPulseAmplitude / test::kMaxS16); EXPECT_NEAR(noise_level_dbfs, expected_noise_floor_dbfs, 0.5f); diff --git a/modules/audio_processing/agc2/noise_spectrum_estimator.cc b/modules/audio_processing/agc2/noise_spectrum_estimator.cc deleted file mode 100644 index f283f4e27f..0000000000 --- a/modules/audio_processing/agc2/noise_spectrum_estimator.cc +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "modules/audio_processing/agc2/noise_spectrum_estimator.h" - -#include - -#include - -#include "api/array_view.h" -#include "modules/audio_processing/logging/apm_data_dumper.h" -#include "rtc_base/arraysize.h" -#include "rtc_base/checks.h" - -namespace webrtc { -namespace { -constexpr float kMinNoisePower = 100.f; -} // namespace - -NoiseSpectrumEstimator::NoiseSpectrumEstimator(ApmDataDumper* data_dumper) - : data_dumper_(data_dumper) { - Initialize(); -} - -void NoiseSpectrumEstimator::Initialize() { - std::fill(noise_spectrum_, noise_spectrum_ + arraysize(noise_spectrum_), - kMinNoisePower); -} - -void NoiseSpectrumEstimator::Update(rtc::ArrayView spectrum, - bool first_update) { - RTC_DCHECK_EQ(65, spectrum.size()); - - if (first_update) { - // Initialize the noise spectral estimate with the signal spectrum. - std::copy(spectrum.data(), spectrum.data() + spectrum.size(), - noise_spectrum_); - } else { - // Smoothly update the noise spectral estimate towards the signal spectrum - // such that the magnitude of the updates are limited. - for (size_t k = 0; k < spectrum.size(); ++k) { - if (noise_spectrum_[k] < spectrum[k]) { - noise_spectrum_[k] = std::min( - 1.01f * noise_spectrum_[k], - noise_spectrum_[k] + 0.05f * (spectrum[k] - noise_spectrum_[k])); - } else { - noise_spectrum_[k] = std::max( - 0.99f * noise_spectrum_[k], - noise_spectrum_[k] + 0.05f * (spectrum[k] - noise_spectrum_[k])); - } - } - } - - // Ensure that the noise spectal estimate does not become too low. - for (auto& v : noise_spectrum_) { - v = std::max(v, kMinNoisePower); - } - - data_dumper_->DumpRaw("agc2_noise_spectrum", 65, noise_spectrum_); - data_dumper_->DumpRaw("agc2_signal_spectrum", spectrum); -} - -} // namespace webrtc diff --git a/modules/audio_processing/agc2/noise_spectrum_estimator.h b/modules/audio_processing/agc2/noise_spectrum_estimator.h deleted file mode 100644 index e9895f0b1e..0000000000 --- a/modules/audio_processing/agc2/noise_spectrum_estimator.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef MODULES_AUDIO_PROCESSING_AGC2_NOISE_SPECTRUM_ESTIMATOR_H_ -#define MODULES_AUDIO_PROCESSING_AGC2_NOISE_SPECTRUM_ESTIMATOR_H_ - -#include "api/array_view.h" - -namespace webrtc { - -class ApmDataDumper; - -class NoiseSpectrumEstimator { - public: - explicit NoiseSpectrumEstimator(ApmDataDumper* data_dumper); - - NoiseSpectrumEstimator() = delete; - NoiseSpectrumEstimator(const NoiseSpectrumEstimator&) = delete; - NoiseSpectrumEstimator& operator=(const NoiseSpectrumEstimator&) = delete; - - void Initialize(); - void Update(rtc::ArrayView spectrum, bool first_update); - - rtc::ArrayView GetNoiseSpectrum() const { - return rtc::ArrayView(noise_spectrum_); - } - - private: - ApmDataDumper* data_dumper_; - float noise_spectrum_[65]; -}; - -} // namespace webrtc - -#endif // MODULES_AUDIO_PROCESSING_AGC2_NOISE_SPECTRUM_ESTIMATOR_H_ diff --git a/modules/audio_processing/agc2/rnn_vad/BUILD.gn b/modules/audio_processing/agc2/rnn_vad/BUILD.gn index bc848b3e13..f0d7093783 100644 --- a/modules/audio_processing/agc2/rnn_vad/BUILD.gn +++ b/modules/audio_processing/agc2/rnn_vad/BUILD.gn @@ -57,7 +57,7 @@ rtc_source_set("rnn_vad_common") { # TODO(alessiob): Make this target visibility private. visibility = [ ":*", - "..:rnn_vad_with_level", + "..:vad_wrapper", ] sources = [ "common.h" ] deps = [ diff --git a/modules/audio_processing/agc2/rnn_vad/auto_correlation.cc b/modules/audio_processing/agc2/rnn_vad/auto_correlation.cc index 431c01fab3..3ddeec8dba 100644 --- a/modules/audio_processing/agc2/rnn_vad/auto_correlation.cc +++ b/modules/audio_processing/agc2/rnn_vad/auto_correlation.cc @@ -40,7 +40,7 @@ AutoCorrelationCalculator::~AutoCorrelationCalculator() = default; // [ y_{m-1} ] // x and y are sub-array of equal length; x is never moved, whereas y slides. // The cross-correlation between y_0 and x corresponds to the auto-correlation -// for the maximum pitch period. Hence, the first value in |auto_corr| has an +// for the maximum pitch period. Hence, the first value in `auto_corr` has an // inverted lag equal to 0 that corresponds to a lag equal to the maximum // pitch period. void AutoCorrelationCalculator::ComputeOnPitchBuffer( diff --git a/modules/audio_processing/agc2/rnn_vad/auto_correlation.h b/modules/audio_processing/agc2/rnn_vad/auto_correlation.h index d58558ca2e..1ae5054567 100644 --- a/modules/audio_processing/agc2/rnn_vad/auto_correlation.h +++ b/modules/audio_processing/agc2/rnn_vad/auto_correlation.h @@ -31,7 +31,7 @@ class AutoCorrelationCalculator { ~AutoCorrelationCalculator(); // Computes the auto-correlation coefficients for a target pitch interval. - // |auto_corr| indexes are inverted lags. + // `auto_corr` indexes are inverted lags. void ComputeOnPitchBuffer( rtc::ArrayView pitch_buf, rtc::ArrayView auto_corr); diff --git a/modules/audio_processing/agc2/rnn_vad/common.h b/modules/audio_processing/agc2/rnn_vad/common.h index be5a2d58ce..c099373200 100644 --- a/modules/audio_processing/agc2/rnn_vad/common.h +++ b/modules/audio_processing/agc2/rnn_vad/common.h @@ -52,8 +52,8 @@ constexpr int kBufSize12kHz = kBufSize24kHz / 2; constexpr int kInitialMinPitch12kHz = kInitialMinPitch24kHz / 2; constexpr int kMaxPitch12kHz = kMaxPitch24kHz / 2; static_assert(kMaxPitch12kHz > kInitialMinPitch12kHz, ""); -// The inverted lags for the pitch interval [|kInitialMinPitch12kHz|, -// |kMaxPitch12kHz|] are in the range [0, |kNumLags12kHz|]. +// The inverted lags for the pitch interval [`kInitialMinPitch12kHz`, +// `kMaxPitch12kHz`] are in the range [0, `kNumLags12kHz`]. constexpr int kNumLags12kHz = kMaxPitch12kHz - kInitialMinPitch12kHz; // 48 kHz constants. diff --git a/modules/audio_processing/agc2/rnn_vad/features_extraction.cc b/modules/audio_processing/agc2/rnn_vad/features_extraction.cc index f86eba764e..502023428d 100644 --- a/modules/audio_processing/agc2/rnn_vad/features_extraction.cc +++ b/modules/audio_processing/agc2/rnn_vad/features_extraction.cc @@ -19,8 +19,8 @@ namespace webrtc { namespace rnn_vad { namespace { -// Generated via "B, A = scipy.signal.butter(2, 30/12000, btype='highpass')" -const BiQuadFilter::BiQuadCoefficients kHpfConfig24k = { +// Computed as `scipy.signal.butter(N=2, Wn=60/24000, btype='highpass')`. +constexpr BiQuadFilter::Config kHpfConfig24k{ {0.99446179f, -1.98892358f, 0.99446179f}, {-1.98889291f, 0.98895425f}}; @@ -28,6 +28,7 @@ const BiQuadFilter::BiQuadCoefficients kHpfConfig24k = { FeaturesExtractor::FeaturesExtractor(const AvailableCpuFeatures& cpu_features) : use_high_pass_filter_(false), + hpf_(kHpfConfig24k), pitch_buf_24kHz_(), pitch_buf_24kHz_view_(pitch_buf_24kHz_.GetBufferView()), lp_residual_(kBufSize24kHz), @@ -35,7 +36,6 @@ FeaturesExtractor::FeaturesExtractor(const AvailableCpuFeatures& cpu_features) pitch_estimator_(cpu_features), reference_frame_view_(pitch_buf_24kHz_.GetMostRecentValuesView()) { RTC_DCHECK_EQ(kBufSize24kHz, lp_residual_.size()); - hpf_.Initialize(kHpfConfig24k); Reset(); } @@ -44,8 +44,9 @@ FeaturesExtractor::~FeaturesExtractor() = default; void FeaturesExtractor::Reset() { pitch_buf_24kHz_.Reset(); spectral_features_extractor_.Reset(); - if (use_high_pass_filter_) + if (use_high_pass_filter_) { hpf_.Reset(); + } } bool FeaturesExtractor::CheckSilenceComputeFeatures( @@ -55,10 +56,10 @@ bool FeaturesExtractor::CheckSilenceComputeFeatures( if (use_high_pass_filter_) { std::array samples_filtered; hpf_.Process(samples, samples_filtered); - // Feed buffer with the pre-processed version of |samples|. + // Feed buffer with the pre-processed version of `samples`. pitch_buf_24kHz_.Push(samples_filtered); } else { - // Feed buffer with |samples|. + // Feed buffer with `samples`. pitch_buf_24kHz_.Push(samples); } // Extract the LP residual. diff --git a/modules/audio_processing/agc2/rnn_vad/features_extraction.h b/modules/audio_processing/agc2/rnn_vad/features_extraction.h index f4cea7a83d..d47a85bfb0 100644 --- a/modules/audio_processing/agc2/rnn_vad/features_extraction.h +++ b/modules/audio_processing/agc2/rnn_vad/features_extraction.h @@ -33,7 +33,7 @@ class FeaturesExtractor { void Reset(); // Analyzes the samples, computes the feature vector and returns true if // silence is detected (false if not). When silence is detected, - // |feature_vector| is partially written and therefore must not be used to + // `feature_vector` is partially written and therefore must not be used to // feed the VAD RNN. bool CheckSilenceComputeFeatures( rtc::ArrayView samples, diff --git a/modules/audio_processing/agc2/rnn_vad/features_extraction_unittest.cc b/modules/audio_processing/agc2/rnn_vad/features_extraction_unittest.cc index 98da39e38a..96f956adfe 100644 --- a/modules/audio_processing/agc2/rnn_vad/features_extraction_unittest.cc +++ b/modules/audio_processing/agc2/rnn_vad/features_extraction_unittest.cc @@ -29,7 +29,7 @@ constexpr int ceil(int n, int m) { } // Number of 10 ms frames required to fill a pitch buffer having size -// |kBufSize24kHz|. +// `kBufSize24kHz`. constexpr int kNumTestDataFrames = ceil(kBufSize24kHz, kFrameSize10ms24kHz); // Number of samples for the test data. constexpr int kNumTestDataSize = kNumTestDataFrames * kFrameSize10ms24kHz; @@ -47,8 +47,8 @@ void CreatePureTone(float amplitude, float freq_hz, rtc::ArrayView dst) { } } -// Feeds |features_extractor| with |samples| splitting it in 10 ms frames. -// For every frame, the output is written into |feature_vector|. Returns true +// Feeds `features_extractor` with `samples` splitting it in 10 ms frames. +// For every frame, the output is written into `feature_vector`. Returns true // if silence is detected in the last frame. bool FeedTestData(FeaturesExtractor& features_extractor, rtc::ArrayView samples, diff --git a/modules/audio_processing/agc2/rnn_vad/lp_residual.cc b/modules/audio_processing/agc2/rnn_vad/lp_residual.cc index c553aa2ad1..484bfba459 100644 --- a/modules/audio_processing/agc2/rnn_vad/lp_residual.cc +++ b/modules/audio_processing/agc2/rnn_vad/lp_residual.cc @@ -22,9 +22,9 @@ namespace webrtc { namespace rnn_vad { namespace { -// Computes auto-correlation coefficients for |x| and writes them in -// |auto_corr|. The lag values are in {0, ..., max_lag - 1}, where max_lag -// equals the size of |auto_corr|. +// Computes auto-correlation coefficients for `x` and writes them in +// `auto_corr`. The lag values are in {0, ..., max_lag - 1}, where max_lag +// equals the size of `auto_corr`. void ComputeAutoCorrelation( rtc::ArrayView x, rtc::ArrayView auto_corr) { diff --git a/modules/audio_processing/agc2/rnn_vad/lp_residual.h b/modules/audio_processing/agc2/rnn_vad/lp_residual.h index 380d9f608b..d04c536ec1 100644 --- a/modules/audio_processing/agc2/rnn_vad/lp_residual.h +++ b/modules/audio_processing/agc2/rnn_vad/lp_residual.h @@ -21,14 +21,14 @@ namespace rnn_vad { // Linear predictive coding (LPC) inverse filter length. constexpr int kNumLpcCoefficients = 5; -// Given a frame |x|, computes a post-processed version of LPC coefficients +// Given a frame `x`, computes a post-processed version of LPC coefficients // tailored for pitch estimation. void ComputeAndPostProcessLpcCoefficients( rtc::ArrayView x, rtc::ArrayView lpc_coeffs); -// Computes the LP residual for the input frame |x| and the LPC coefficients -// |lpc_coeffs|. |y| and |x| can point to the same array for in-place +// Computes the LP residual for the input frame `x` and the LPC coefficients +// `lpc_coeffs`. `y` and `x` can point to the same array for in-place // computation. void ComputeLpResidual( rtc::ArrayView lpc_coeffs, diff --git a/modules/audio_processing/agc2/rnn_vad/pitch_search.cc b/modules/audio_processing/agc2/rnn_vad/pitch_search.cc index 77a118853f..419620fc0c 100644 --- a/modules/audio_processing/agc2/rnn_vad/pitch_search.cc +++ b/modules/audio_processing/agc2/rnn_vad/pitch_search.cc @@ -44,7 +44,7 @@ int PitchEstimator::Estimate( CandidatePitchPeriods pitch_periods = ComputePitchPeriod12kHz( pitch_buffer_12kHz_view, auto_correlation_12kHz_view, cpu_features_); // The refinement is done using the pitch buffer that contains 24 kHz samples. - // Therefore, adapt the inverted lags in |pitch_candidates_inv_lags| from 12 + // Therefore, adapt the inverted lags in `pitch_candidates_inv_lags` from 12 // to 24 kHz. pitch_periods.best *= 2; pitch_periods.second_best *= 2; diff --git a/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc b/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc index 0b8a77e488..e8c912518d 100644 --- a/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc +++ b/modules/audio_processing/agc2/rnn_vad/pitch_search_internal.cc @@ -54,18 +54,18 @@ int GetPitchPseudoInterpolationOffset(float prev_auto_correlation, float next_auto_correlation) { if ((next_auto_correlation - prev_auto_correlation) > 0.7f * (curr_auto_correlation - prev_auto_correlation)) { - return 1; // |next_auto_correlation| is the largest auto-correlation + return 1; // `next_auto_correlation` is the largest auto-correlation // coefficient. } else if ((prev_auto_correlation - next_auto_correlation) > 0.7f * (curr_auto_correlation - next_auto_correlation)) { - return -1; // |prev_auto_correlation| is the largest auto-correlation + return -1; // `prev_auto_correlation` is the largest auto-correlation // coefficient. } return 0; } -// Refines a pitch period |lag| encoded as lag with pseudo-interpolation. The -// output sample rate is twice as that of |lag|. +// Refines a pitch period `lag` encoded as lag with pseudo-interpolation. The +// output sample rate is twice as that of `lag`. int PitchPseudoInterpolationLagPitchBuf( int lag, rtc::ArrayView pitch_buffer, @@ -217,8 +217,8 @@ int ComputePitchPeriod48kHz( auto_correlation[best_inverted_lag + 1], auto_correlation[best_inverted_lag], auto_correlation[best_inverted_lag - 1]); - // TODO(bugs.webrtc.org/9076): When retraining, check if |offset| below should - // be subtracted since |inverted_lag| is an inverted lag but offset is a lag. + // TODO(bugs.webrtc.org/9076): When retraining, check if `offset` below should + // be subtracted since `inverted_lag` is an inverted lag but offset is a lag. return 2 * best_inverted_lag + offset; } @@ -250,24 +250,24 @@ bool IsAlternativePitchStrongerThanInitial(PitchInfo last, RTC_DCHECK_GE(initial.period, 0); RTC_DCHECK_GE(alternative.period, 0); RTC_DCHECK_GE(period_divisor, 2); - // Compute a term that lowers the threshold when |alternative.period| is close - // to the last estimated period |last.period| - i.e., pitch tracking. + // Compute a term that lowers the threshold when `alternative.period` is close + // to the last estimated period `last.period` - i.e., pitch tracking. float lower_threshold_term = 0.f; if (std::abs(alternative.period - last.period) <= 1) { // The candidate pitch period is within 1 sample from the last one. - // Make the candidate at |alternative.period| very easy to be accepted. + // Make the candidate at `alternative.period` very easy to be accepted. lower_threshold_term = last.strength; } else if (std::abs(alternative.period - last.period) == 2 && initial.period > kInitialPitchPeriodThresholds[period_divisor - 2]) { // The candidate pitch period is 2 samples far from the last one and the - // period |initial.period| (from which |alternative.period| has been - // derived) is greater than a threshold. Make |alternative.period| easy to + // period `initial.period` (from which `alternative.period` has been + // derived) is greater than a threshold. Make `alternative.period` easy to // be accepted. lower_threshold_term = 0.5f * last.strength; } // Set the threshold based on the strength of the initial estimate - // |initial.period|. Also reduce the chance of false positives caused by a + // `initial.period`. Also reduce the chance of false positives caused by a // bias towards high frequencies (originating from short-term correlations). float threshold = std::max(0.3f, 0.7f * initial.strength - lower_threshold_term); @@ -359,7 +359,7 @@ CandidatePitchPeriods ComputePitchPeriod12kHz( } } } - // Update |squared_energy_y| for the next inverted lag. + // Update `squared_energy_y` for the next inverted lag. const float y_old = pitch_buffer[inverted_lag]; const float y_new = pitch_buffer[inverted_lag + kFrameSize20ms12kHz]; denominator -= y_old * y_old; @@ -457,9 +457,9 @@ PitchInfo ComputeExtendedPitchPeriod48kHz( alternative_pitch.period = GetAlternativePitchPeriod( initial_pitch.period, /*multiplier=*/1, period_divisor); RTC_DCHECK_GE(alternative_pitch.period, kMinPitch24kHz); - // When looking at |alternative_pitch.period|, we also look at one of its - // sub-harmonics. |kSubHarmonicMultipliers| is used to know where to look. - // |period_divisor| == 2 is a special case since |dual_alternative_period| + // When looking at `alternative_pitch.period`, we also look at one of its + // sub-harmonics. `kSubHarmonicMultipliers` is used to know where to look. + // `period_divisor` == 2 is a special case since `dual_alternative_period` // might be greater than the maximum pitch period. int dual_alternative_period = GetAlternativePitchPeriod( initial_pitch.period, kSubHarmonicMultipliers[period_divisor - 2], @@ -472,8 +472,8 @@ PitchInfo ComputeExtendedPitchPeriod48kHz( << "The lower pitch period and the additional sub-harmonic must not " "coincide."; // Compute an auto-correlation score for the primary pitch candidate - // |alternative_pitch.period| by also looking at its possible sub-harmonic - // |dual_alternative_period|. + // `alternative_pitch.period` by also looking at its possible sub-harmonic + // `dual_alternative_period`. const float xy_primary_period = ComputeAutoCorrelation( kMaxPitch24kHz - alternative_pitch.period, pitch_buffer, vector_math); // TODO(webrtc:10480): Copy `xy_primary_period` if the secondary period is diff --git a/modules/audio_processing/agc2/rnn_vad/ring_buffer.h b/modules/audio_processing/agc2/rnn_vad/ring_buffer.h index f0270af918..a6f7fdd1a6 100644 --- a/modules/audio_processing/agc2/rnn_vad/ring_buffer.h +++ b/modules/audio_processing/agc2/rnn_vad/ring_buffer.h @@ -35,7 +35,7 @@ class RingBuffer { ~RingBuffer() = default; // Set the ring buffer values to zero. void Reset() { buffer_.fill(0); } - // Replace the least recently pushed array in the buffer with |new_values|. + // Replace the least recently pushed array in the buffer with `new_values`. void Push(rtc::ArrayView new_values) { std::memcpy(buffer_.data() + S * tail_, new_values.data(), S * sizeof(T)); tail_ += 1; @@ -43,7 +43,7 @@ class RingBuffer { tail_ = 0; } // Return an array view onto the array with a given delay. A view on the last - // and least recently push array is returned when |delay| is 0 and N - 1 + // and least recently push array is returned when `delay` is 0 and N - 1 // respectively. rtc::ArrayView GetArrayView(int delay) const { RTC_DCHECK_LE(0, delay); diff --git a/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc b/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc index ecbb198c96..91501fb6e3 100644 --- a/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc +++ b/modules/audio_processing/agc2/rnn_vad/rnn_fc.cc @@ -32,7 +32,7 @@ std::vector GetScaledParams(rtc::ArrayView params) { // TODO(bugs.chromium.org/10480): Hard-code optimized layout and remove this // function to improve setup time. -// Casts and scales |weights| and re-arranges the layout. +// Casts and scales `weights` and re-arranges the layout. std::vector PreprocessWeights(rtc::ArrayView weights, int output_size) { if (output_size == 1) { diff --git a/modules/audio_processing/agc2/rnn_vad/rnn_gru.cc b/modules/audio_processing/agc2/rnn_vad/rnn_gru.cc index 482016e8d3..ef37410caa 100644 --- a/modules/audio_processing/agc2/rnn_vad/rnn_gru.cc +++ b/modules/audio_processing/agc2/rnn_vad/rnn_gru.cc @@ -24,7 +24,7 @@ constexpr int kNumGruGates = 3; // Update, reset, output. std::vector PreprocessGruTensor(rtc::ArrayView tensor_src, int output_size) { // Transpose, cast and scale. - // |n| is the size of the first dimension of the 3-dim tensor |weights|. + // `n` is the size of the first dimension of the 3-dim tensor `weights`. const int n = rtc::CheckedDivExact(rtc::dchecked_cast(tensor_src.size()), output_size * kNumGruGates); const int stride_src = kNumGruGates * output_size; diff --git a/modules/audio_processing/agc2/rnn_vad/rnn_vad_unittest.cc b/modules/audio_processing/agc2/rnn_vad/rnn_vad_unittest.cc index 989b235705..f33cd14a8a 100644 --- a/modules/audio_processing/agc2/rnn_vad/rnn_vad_unittest.cc +++ b/modules/audio_processing/agc2/rnn_vad/rnn_vad_unittest.cc @@ -49,7 +49,7 @@ void DumpPerfStats(int num_samples, // constant below to true in order to write new expected output binary files. constexpr bool kWriteComputedOutputToFile = false; -// Avoids that one forgets to set |kWriteComputedOutputToFile| back to false +// Avoids that one forgets to set `kWriteComputedOutputToFile` back to false // when the expected output files are re-exported. TEST(RnnVadTest, CheckWriteComputedOutputIsFalse) { ASSERT_FALSE(kWriteComputedOutputToFile) diff --git a/modules/audio_processing/agc2/rnn_vad/sequence_buffer_unittest.cc b/modules/audio_processing/agc2/rnn_vad/sequence_buffer_unittest.cc index f577571b09..af005833c1 100644 --- a/modules/audio_processing/agc2/rnn_vad/sequence_buffer_unittest.cc +++ b/modules/audio_processing/agc2/rnn_vad/sequence_buffer_unittest.cc @@ -50,7 +50,7 @@ void TestSequenceBufferPushOp() { for (int i = 0; i < N; ++i) chunk[i] = static_cast(i + 1); seq_buf.Push(chunk); - // With the next Push(), |last| will be moved left by N positions. + // With the next Push(), `last` will be moved left by N positions. const T last = chunk[N - 1]; for (int i = 0; i < N; ++i) chunk[i] = static_cast(last + i + 1); diff --git a/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.cc b/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.cc index 91c0086fc4..a10b0f7ec9 100644 --- a/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.cc +++ b/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.cc @@ -23,7 +23,7 @@ namespace { // Weights for each FFT coefficient for each Opus band (Nyquist frequency // excluded). The size of each band is specified in -// |kOpusScaleNumBins24kHz20ms|. +// `kOpusScaleNumBins24kHz20ms`. constexpr std::array kOpusBandWeights24kHz20ms = {{ 0.f, 0.25f, 0.5f, 0.75f, // Band 0 diff --git a/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.h b/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.h index aa7b1c6a47..f4b293a567 100644 --- a/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.h +++ b/modules/audio_processing/agc2/rnn_vad/spectral_features_internal.h @@ -50,8 +50,8 @@ class SpectralCorrelator { ~SpectralCorrelator(); // Computes the band-wise spectral auto-correlations. - // |x| must: - // - have size equal to |kFrameSize20ms24kHz|; + // `x` must: + // - have size equal to `kFrameSize20ms24kHz`; // - be encoded as vectors of interleaved real-complex FFT coefficients // where x[1] = y[1] = 0 (the Nyquist frequency coefficient is omitted). void ComputeAutoCorrelation( @@ -59,8 +59,8 @@ class SpectralCorrelator { rtc::ArrayView auto_corr) const; // Computes the band-wise spectral cross-correlations. - // |x| and |y| must: - // - have size equal to |kFrameSize20ms24kHz|; + // `x` and `y` must: + // - have size equal to `kFrameSize20ms24kHz`; // - be encoded as vectors of interleaved real-complex FFT coefficients where // x[1] = y[1] = 0 (the Nyquist frequency coefficient is omitted). void ComputeCrossCorrelation( @@ -82,12 +82,12 @@ void ComputeSmoothedLogMagnitudeSpectrum( // TODO(bugs.webrtc.org/10480): Move to anonymous namespace in // spectral_features.cc. Creates a DCT table for arrays having size equal to -// |kNumBands|. Declared here for unit testing. +// `kNumBands`. Declared here for unit testing. std::array ComputeDctTable(); // TODO(bugs.webrtc.org/10480): Move to anonymous namespace in -// spectral_features.cc. Computes DCT for |in| given a pre-computed DCT table. -// In-place computation is not allowed and |out| can be smaller than |in| in +// spectral_features.cc. Computes DCT for `in` given a pre-computed DCT table. +// In-place computation is not allowed and `out` can be smaller than `in` in // order to only compute the first DCT coefficients. Declared here for unit // testing. void ComputeDct(rtc::ArrayView in, diff --git a/modules/audio_processing/agc2/rnn_vad/spectral_features_internal_unittest.cc b/modules/audio_processing/agc2/rnn_vad/spectral_features_internal_unittest.cc index 11a44a57da..ece4eb5024 100644 --- a/modules/audio_processing/agc2/rnn_vad/spectral_features_internal_unittest.cc +++ b/modules/audio_processing/agc2/rnn_vad/spectral_features_internal_unittest.cc @@ -28,7 +28,7 @@ namespace webrtc { namespace rnn_vad { namespace { -// Generates the values for the array named |kOpusBandWeights24kHz20ms| in the +// Generates the values for the array named `kOpusBandWeights24kHz20ms` in the // anonymous namespace of the .cc file, which is the array of FFT coefficient // weights for the Opus scale triangular filters. std::vector ComputeTriangularFiltersWeights() { @@ -66,7 +66,7 @@ TEST(RnnVadTest, TestOpusScaleBoundaries) { // Checks that the computed triangular filters weights for the Opus scale are // monotonic withing each Opus band. This test should only be enabled when -// ComputeTriangularFiltersWeights() is changed and |kOpusBandWeights24kHz20ms| +// ComputeTriangularFiltersWeights() is changed and `kOpusBandWeights24kHz20ms` // is updated accordingly. TEST(RnnVadTest, DISABLED_TestOpusScaleWeights) { auto weights = ComputeTriangularFiltersWeights(); diff --git a/modules/audio_processing/agc2/rnn_vad/spectral_features_unittest.cc b/modules/audio_processing/agc2/rnn_vad/spectral_features_unittest.cc index 9f41e96e5e..324d694957 100644 --- a/modules/audio_processing/agc2/rnn_vad/spectral_features_unittest.cc +++ b/modules/audio_processing/agc2/rnn_vad/spectral_features_unittest.cc @@ -118,12 +118,11 @@ TEST(RnnVadTest, CepstralFeaturesConstantAverageZeroDerivative) { std::array samples; rtc::ArrayView samples_view(samples); WriteTestData(samples); - bool is_silence; // Fill the spectral features with test data. std::array feature_vector; for (int i = 0; i < kCepstralCoeffsHistorySize; ++i) { - is_silence = sfe.CheckSilenceComputeFeatures( + sfe.CheckSilenceComputeFeatures( samples_view, samples_view, GetHigherBandsSpectrum(&feature_vector), GetAverage(&feature_vector), GetFirstDerivative(&feature_vector), GetSecondDerivative(&feature_vector), @@ -133,7 +132,7 @@ TEST(RnnVadTest, CepstralFeaturesConstantAverageZeroDerivative) { // Feed the test data one last time but using a different output vector. std::array feature_vector_last; - is_silence = sfe.CheckSilenceComputeFeatures( + sfe.CheckSilenceComputeFeatures( samples_view, samples_view, GetHigherBandsSpectrum(&feature_vector_last), GetAverage(&feature_vector_last), GetFirstDerivative(&feature_vector_last), diff --git a/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer.h b/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer.h index dd3b62a1a3..d186479551 100644 --- a/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer.h +++ b/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer.h @@ -46,9 +46,9 @@ class SymmetricMatrixBuffer { buf_.fill(0); } // Pushes the results from the comparison between the most recent item and - // those that are still in the ring buffer. The first element in |values| must + // those that are still in the ring buffer. The first element in `values` must // correspond to the comparison between the most recent item and the second - // most recent one in the ring buffer, whereas the last element in |values| + // most recent one in the ring buffer, whereas the last element in `values` // must correspond to the comparison between the most recent item and the // oldest one in the ring buffer. void Push(rtc::ArrayView values) { @@ -64,7 +64,7 @@ class SymmetricMatrixBuffer { } } // Reads the value that corresponds to comparison of two items in the ring - // buffer having delay |delay1| and |delay2|. The two arguments must not be + // buffer having delay `delay1` and `delay2`. The two arguments must not be // equal and both must be in {0, ..., S - 1}. T GetValue(int delay1, int delay2) const { int row = S - 1 - delay1; diff --git a/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer_unittest.cc b/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer_unittest.cc index 6f61c87104..1509ca5ac1 100644 --- a/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer_unittest.cc +++ b/modules/audio_processing/agc2/rnn_vad/symmetric_matrix_buffer_unittest.cc @@ -58,17 +58,17 @@ TEST(RnnVadTest, SymmetricMatrixBufferUseCase) { SCOPED_TRACE(t); const int t_removed = ring_buf.GetArrayView(kRingBufSize - 1)[0]; ring_buf.Push({&t, 1}); - // The head of the ring buffer is |t|. + // The head of the ring buffer is `t`. ASSERT_EQ(t, ring_buf.GetArrayView(0)[0]); - // Create the comparisons between |t| and the older elements in the ring + // Create the comparisons between `t` and the older elements in the ring // buffer. std::array new_comparions; for (int i = 0; i < kRingBufSize - 1; ++i) { - // Start comparing |t| to the second newest element in the ring buffer. + // Start comparing `t` to the second newest element in the ring buffer. const int delay = i + 1; const auto t_prev = ring_buf.GetArrayView(delay)[0]; ASSERT_EQ(std::max(0, t - delay), t_prev); - // Compare the last element |t| with |t_prev|. + // Compare the last element `t` with `t_prev`. new_comparions[i].first = t_prev; new_comparions[i].second = t; } diff --git a/modules/audio_processing/agc2/saturation_protector.cc b/modules/audio_processing/agc2/saturation_protector.cc index d6f21ef891..961baf4cd3 100644 --- a/modules/audio_processing/agc2/saturation_protector.cc +++ b/modules/audio_processing/agc2/saturation_protector.cc @@ -95,12 +95,10 @@ void UpdateSaturationProtectorState(float peak_dbfs, class SaturationProtectorImpl : public SaturationProtector { public: explicit SaturationProtectorImpl(float initial_headroom_db, - float extra_headroom_db, int adjacent_speech_frames_threshold, ApmDataDumper* apm_data_dumper) : apm_data_dumper_(apm_data_dumper), initial_headroom_db_(initial_headroom_db), - extra_headroom_db_(extra_headroom_db), adjacent_speech_frames_threshold_(adjacent_speech_frames_threshold) { Reset(); } @@ -140,7 +138,7 @@ class SaturationProtectorImpl : public SaturationProtector { if (num_adjacent_speech_frames_ >= adjacent_speech_frames_threshold_) { // `preliminary_state_` is now reliable. Update the headroom. - headroom_db_ = preliminary_state_.headroom_db + extra_headroom_db_; + headroom_db_ = preliminary_state_.headroom_db; } } DumpDebugData(); @@ -148,7 +146,7 @@ class SaturationProtectorImpl : public SaturationProtector { void Reset() override { num_adjacent_speech_frames_ = 0; - headroom_db_ = initial_headroom_db_ + extra_headroom_db_; + headroom_db_ = initial_headroom_db_; ResetSaturationProtectorState(initial_headroom_db_, preliminary_state_); ResetSaturationProtectorState(initial_headroom_db_, reliable_state_); } @@ -165,7 +163,6 @@ class SaturationProtectorImpl : public SaturationProtector { ApmDataDumper* const apm_data_dumper_; const float initial_headroom_db_; - const float extra_headroom_db_; const int adjacent_speech_frames_threshold_; int num_adjacent_speech_frames_; float headroom_db_; @@ -177,12 +174,10 @@ class SaturationProtectorImpl : public SaturationProtector { std::unique_ptr CreateSaturationProtector( float initial_headroom_db, - float extra_headroom_db, int adjacent_speech_frames_threshold, ApmDataDumper* apm_data_dumper) { return std::make_unique( - initial_headroom_db, extra_headroom_db, adjacent_speech_frames_threshold, - apm_data_dumper); + initial_headroom_db, adjacent_speech_frames_threshold, apm_data_dumper); } } // namespace webrtc diff --git a/modules/audio_processing/agc2/saturation_protector.h b/modules/audio_processing/agc2/saturation_protector.h index 0c384f1fa0..ef22145d5f 100644 --- a/modules/audio_processing/agc2/saturation_protector.h +++ b/modules/audio_processing/agc2/saturation_protector.h @@ -38,7 +38,6 @@ class SaturationProtector { // Creates a saturation protector that starts at `initial_headroom_db`. std::unique_ptr CreateSaturationProtector( float initial_headroom_db, - float extra_headroom_db, int adjacent_speech_frames_threshold, ApmDataDumper* apm_data_dumper); diff --git a/modules/audio_processing/agc2/saturation_protector_unittest.cc b/modules/audio_processing/agc2/saturation_protector_unittest.cc index dc16dc254c..3b104be8cd 100644 --- a/modules/audio_processing/agc2/saturation_protector_unittest.cc +++ b/modules/audio_processing/agc2/saturation_protector_unittest.cc @@ -18,7 +18,6 @@ namespace webrtc { namespace { constexpr float kInitialHeadroomDb = 20.0f; -constexpr float kNoExtraHeadroomDb = 0.0f; constexpr int kNoAdjacentSpeechFramesRequired = 1; constexpr float kMaxSpeechProbability = 1.0f; @@ -47,8 +46,7 @@ float RunOnConstantLevel(int num_iterations, TEST(GainController2SaturationProtector, Reset) { ApmDataDumper apm_data_dumper(0); auto saturation_protector = CreateSaturationProtector( - kInitialHeadroomDb, kNoExtraHeadroomDb, kNoAdjacentSpeechFramesRequired, - &apm_data_dumper); + kInitialHeadroomDb, kNoAdjacentSpeechFramesRequired, &apm_data_dumper); const float initial_headroom_db = saturation_protector->HeadroomDb(); RunOnConstantLevel(/*num_iterations=*/10, kMaxSpeechProbability, /*peak_dbfs=*/0.0f, @@ -71,43 +69,13 @@ TEST(GainController2SaturationProtector, EstimatesCrestRatio) { ApmDataDumper apm_data_dumper(0); auto saturation_protector = CreateSaturationProtector( - kInitialHeadroomDb, kNoExtraHeadroomDb, kNoAdjacentSpeechFramesRequired, - &apm_data_dumper); + kInitialHeadroomDb, kNoAdjacentSpeechFramesRequired, &apm_data_dumper); RunOnConstantLevel(kNumIterations, kMaxSpeechProbability, kPeakLevelDbfs, kSpeechLevelDbfs, *saturation_protector); EXPECT_NEAR(saturation_protector->HeadroomDb(), kCrestFactorDb, kMaxDifferenceDb); } -// Checks that the extra headroom is applied. -TEST(GainController2SaturationProtector, ExtraHeadroomApplied) { - constexpr float kExtraHeadroomDb = 5.1234f; - constexpr int kNumIterations = 10; - constexpr float kPeakLevelDbfs = -20.0f; - constexpr float kSpeechLevelDbfs = kPeakLevelDbfs - 15.0f; - - ApmDataDumper apm_data_dumper(0); - - auto saturation_protector_no_extra = CreateSaturationProtector( - kInitialHeadroomDb, kNoExtraHeadroomDb, kNoAdjacentSpeechFramesRequired, - &apm_data_dumper); - for (int i = 0; i < kNumIterations; ++i) { - saturation_protector_no_extra->Analyze(kMaxSpeechProbability, - kPeakLevelDbfs, kSpeechLevelDbfs); - } - - auto saturation_protector_extra = CreateSaturationProtector( - kInitialHeadroomDb, kExtraHeadroomDb, kNoAdjacentSpeechFramesRequired, - &apm_data_dumper); - for (int i = 0; i < kNumIterations; ++i) { - saturation_protector_extra->Analyze(kMaxSpeechProbability, kPeakLevelDbfs, - kSpeechLevelDbfs); - } - - EXPECT_EQ(saturation_protector_no_extra->HeadroomDb() + kExtraHeadroomDb, - saturation_protector_extra->HeadroomDb()); -} - // Checks that the headroom does not change too quickly. TEST(GainController2SaturationProtector, ChangeSlowly) { constexpr int kNumIterations = 1000; @@ -119,8 +87,7 @@ TEST(GainController2SaturationProtector, ChangeSlowly) { ApmDataDumper apm_data_dumper(0); auto saturation_protector = CreateSaturationProtector( - kInitialHeadroomDb, kNoExtraHeadroomDb, kNoAdjacentSpeechFramesRequired, - &apm_data_dumper); + kInitialHeadroomDb, kNoAdjacentSpeechFramesRequired, &apm_data_dumper); float max_difference_db = RunOnConstantLevel(kNumIterations, kMaxSpeechProbability, kPeakLevelDbfs, kSpeechLevelDbfs, *saturation_protector); @@ -142,8 +109,7 @@ class SaturationProtectorParametrization TEST_P(SaturationProtectorParametrization, DoNotAdaptToShortSpeechSegments) { ApmDataDumper apm_data_dumper(0); auto saturation_protector = CreateSaturationProtector( - kInitialHeadroomDb, kNoExtraHeadroomDb, - adjacent_speech_frames_threshold(), &apm_data_dumper); + kInitialHeadroomDb, adjacent_speech_frames_threshold(), &apm_data_dumper); const float initial_headroom_db = saturation_protector->HeadroomDb(); RunOnConstantLevel(/*num_iterations=*/adjacent_speech_frames_threshold() - 1, kMaxSpeechProbability, @@ -156,8 +122,7 @@ TEST_P(SaturationProtectorParametrization, DoNotAdaptToShortSpeechSegments) { TEST_P(SaturationProtectorParametrization, AdaptToEnoughSpeechSegments) { ApmDataDumper apm_data_dumper(0); auto saturation_protector = CreateSaturationProtector( - kInitialHeadroomDb, kNoExtraHeadroomDb, - adjacent_speech_frames_threshold(), &apm_data_dumper); + kInitialHeadroomDb, adjacent_speech_frames_threshold(), &apm_data_dumper); const float initial_headroom_db = saturation_protector->HeadroomDb(); RunOnConstantLevel(/*num_iterations=*/adjacent_speech_frames_threshold() + 1, kMaxSpeechProbability, diff --git a/modules/audio_processing/agc2/signal_classifier.cc b/modules/audio_processing/agc2/signal_classifier.cc deleted file mode 100644 index 3ef8dd775b..0000000000 --- a/modules/audio_processing/agc2/signal_classifier.cc +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "modules/audio_processing/agc2/signal_classifier.h" - -#include -#include -#include - -#include "api/array_view.h" -#include "modules/audio_processing/agc2/down_sampler.h" -#include "modules/audio_processing/agc2/noise_spectrum_estimator.h" -#include "modules/audio_processing/logging/apm_data_dumper.h" -#include "rtc_base/checks.h" -#include "system_wrappers/include/cpu_features_wrapper.h" - -namespace webrtc { -namespace { - -bool IsSse2Available() { -#if defined(WEBRTC_ARCH_X86_FAMILY) - return GetCPUInfo(kSSE2) != 0; -#else - return false; -#endif -} - -void RemoveDcLevel(rtc::ArrayView x) { - RTC_DCHECK_LT(0, x.size()); - float mean = std::accumulate(x.data(), x.data() + x.size(), 0.f); - mean /= x.size(); - - for (float& v : x) { - v -= mean; - } -} - -void PowerSpectrum(const OouraFft* ooura_fft, - rtc::ArrayView x, - rtc::ArrayView spectrum) { - RTC_DCHECK_EQ(65, spectrum.size()); - RTC_DCHECK_EQ(128, x.size()); - float X[128]; - std::copy(x.data(), x.data() + x.size(), X); - ooura_fft->Fft(X); - - float* X_p = X; - RTC_DCHECK_EQ(X_p, &X[0]); - spectrum[0] = (*X_p) * (*X_p); - ++X_p; - RTC_DCHECK_EQ(X_p, &X[1]); - spectrum[64] = (*X_p) * (*X_p); - for (int k = 1; k < 64; ++k) { - ++X_p; - RTC_DCHECK_EQ(X_p, &X[2 * k]); - spectrum[k] = (*X_p) * (*X_p); - ++X_p; - RTC_DCHECK_EQ(X_p, &X[2 * k + 1]); - spectrum[k] += (*X_p) * (*X_p); - } -} - -webrtc::SignalClassifier::SignalType ClassifySignal( - rtc::ArrayView signal_spectrum, - rtc::ArrayView noise_spectrum, - ApmDataDumper* data_dumper) { - int num_stationary_bands = 0; - int num_highly_nonstationary_bands = 0; - - // Detect stationary and highly nonstationary bands. - for (size_t k = 1; k < 40; k++) { - if (signal_spectrum[k] < 3 * noise_spectrum[k] && - signal_spectrum[k] * 3 > noise_spectrum[k]) { - ++num_stationary_bands; - } else if (signal_spectrum[k] > 9 * noise_spectrum[k]) { - ++num_highly_nonstationary_bands; - } - } - - data_dumper->DumpRaw("agc2_num_stationary_bands", 1, &num_stationary_bands); - data_dumper->DumpRaw("agc2_num_highly_nonstationary_bands", 1, - &num_highly_nonstationary_bands); - - // Use the detected number of bands to classify the overall signal - // stationarity. - if (num_stationary_bands > 15) { - return SignalClassifier::SignalType::kStationary; - } else { - return SignalClassifier::SignalType::kNonStationary; - } -} - -} // namespace - -SignalClassifier::FrameExtender::FrameExtender(size_t frame_size, - size_t extended_frame_size) - : x_old_(extended_frame_size - frame_size, 0.f) {} - -SignalClassifier::FrameExtender::~FrameExtender() = default; - -void SignalClassifier::FrameExtender::ExtendFrame( - rtc::ArrayView x, - rtc::ArrayView x_extended) { - RTC_DCHECK_EQ(x_old_.size() + x.size(), x_extended.size()); - std::copy(x_old_.data(), x_old_.data() + x_old_.size(), x_extended.data()); - std::copy(x.data(), x.data() + x.size(), x_extended.data() + x_old_.size()); - std::copy(x_extended.data() + x_extended.size() - x_old_.size(), - x_extended.data() + x_extended.size(), x_old_.data()); -} - -SignalClassifier::SignalClassifier(ApmDataDumper* data_dumper) - : data_dumper_(data_dumper), - down_sampler_(data_dumper_), - noise_spectrum_estimator_(data_dumper_), - ooura_fft_(IsSse2Available()) { - Initialize(48000); -} -SignalClassifier::~SignalClassifier() {} - -void SignalClassifier::Initialize(int sample_rate_hz) { - down_sampler_.Initialize(sample_rate_hz); - noise_spectrum_estimator_.Initialize(); - frame_extender_.reset(new FrameExtender(80, 128)); - sample_rate_hz_ = sample_rate_hz; - initialization_frames_left_ = 2; - consistent_classification_counter_ = 3; - last_signal_type_ = SignalClassifier::SignalType::kNonStationary; -} - -SignalClassifier::SignalType SignalClassifier::Analyze( - rtc::ArrayView signal) { - RTC_DCHECK_EQ(signal.size(), sample_rate_hz_ / 100); - - // Compute the signal power spectrum. - float downsampled_frame[80]; - down_sampler_.DownSample(signal, downsampled_frame); - float extended_frame[128]; - frame_extender_->ExtendFrame(downsampled_frame, extended_frame); - RemoveDcLevel(extended_frame); - float signal_spectrum[65]; - PowerSpectrum(&ooura_fft_, extended_frame, signal_spectrum); - - // Classify the signal based on the estimate of the noise spectrum and the - // signal spectrum estimate. - const SignalType signal_type = ClassifySignal( - signal_spectrum, noise_spectrum_estimator_.GetNoiseSpectrum(), - data_dumper_); - - // Update the noise spectrum based on the signal spectrum. - noise_spectrum_estimator_.Update(signal_spectrum, - initialization_frames_left_ > 0); - - // Update the number of frames until a reliable signal spectrum is achieved. - initialization_frames_left_ = std::max(0, initialization_frames_left_ - 1); - - if (last_signal_type_ == signal_type) { - consistent_classification_counter_ = - std::max(0, consistent_classification_counter_ - 1); - } else { - last_signal_type_ = signal_type; - consistent_classification_counter_ = 3; - } - - if (consistent_classification_counter_ > 0) { - return SignalClassifier::SignalType::kNonStationary; - } - return signal_type; -} - -} // namespace webrtc diff --git a/modules/audio_processing/agc2/signal_classifier.h b/modules/audio_processing/agc2/signal_classifier.h deleted file mode 100644 index 20cce920f0..0000000000 --- a/modules/audio_processing/agc2/signal_classifier.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef MODULES_AUDIO_PROCESSING_AGC2_SIGNAL_CLASSIFIER_H_ -#define MODULES_AUDIO_PROCESSING_AGC2_SIGNAL_CLASSIFIER_H_ - -#include -#include - -#include "api/array_view.h" -#include "common_audio/third_party/ooura/fft_size_128/ooura_fft.h" -#include "modules/audio_processing/agc2/down_sampler.h" -#include "modules/audio_processing/agc2/noise_spectrum_estimator.h" - -namespace webrtc { - -class ApmDataDumper; -class AudioBuffer; - -class SignalClassifier { - public: - enum class SignalType { kNonStationary, kStationary }; - - explicit SignalClassifier(ApmDataDumper* data_dumper); - - SignalClassifier() = delete; - SignalClassifier(const SignalClassifier&) = delete; - SignalClassifier& operator=(const SignalClassifier&) = delete; - - ~SignalClassifier(); - - void Initialize(int sample_rate_hz); - SignalType Analyze(rtc::ArrayView signal); - - private: - class FrameExtender { - public: - FrameExtender(size_t frame_size, size_t extended_frame_size); - - FrameExtender() = delete; - FrameExtender(const FrameExtender&) = delete; - FrameExtender& operator=(const FrameExtender&) = delete; - - ~FrameExtender(); - - void ExtendFrame(rtc::ArrayView x, - rtc::ArrayView x_extended); - - private: - std::vector x_old_; - }; - - ApmDataDumper* const data_dumper_; - DownSampler down_sampler_; - std::unique_ptr frame_extender_; - NoiseSpectrumEstimator noise_spectrum_estimator_; - int sample_rate_hz_; - int initialization_frames_left_; - int consistent_classification_counter_; - SignalType last_signal_type_; - const OouraFft ooura_fft_; -}; - -} // namespace webrtc - -#endif // MODULES_AUDIO_PROCESSING_AGC2_SIGNAL_CLASSIFIER_H_ diff --git a/modules/audio_processing/agc2/signal_classifier_unittest.cc b/modules/audio_processing/agc2/signal_classifier_unittest.cc deleted file mode 100644 index f1a3a664f0..0000000000 --- a/modules/audio_processing/agc2/signal_classifier_unittest.cc +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "modules/audio_processing/agc2/signal_classifier.h" - -#include -#include -#include - -#include "api/function_view.h" -#include "modules/audio_processing/agc2/agc2_testing_common.h" -#include "modules/audio_processing/logging/apm_data_dumper.h" -#include "rtc_base/gunit.h" -#include "rtc_base/random.h" - -namespace webrtc { -namespace { -constexpr int kNumIterations = 100; - -// Runs the signal classifier on audio generated by 'sample_generator' -// for kNumIterations. Returns the number of frames classified as noise. -float RunClassifier(rtc::FunctionView sample_generator, - int sample_rate_hz) { - ApmDataDumper data_dumper(0); - SignalClassifier classifier(&data_dumper); - std::array signal; - classifier.Initialize(sample_rate_hz); - const size_t samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, 100); - int number_of_noise_frames = 0; - for (int i = 0; i < kNumIterations; ++i) { - for (size_t j = 0; j < samples_per_channel; ++j) { - signal[j] = sample_generator(); - } - number_of_noise_frames += - classifier.Analyze({&signal[0], samples_per_channel}) == - SignalClassifier::SignalType::kStationary; - } - return number_of_noise_frames; -} - -class SignalClassifierParametrization : public ::testing::TestWithParam { - protected: - int sample_rate_hz() const { return GetParam(); } -}; - -// White random noise is stationary, but does not trigger the detector -// every frame due to the randomness. -TEST_P(SignalClassifierParametrization, WhiteNoise) { - test::WhiteNoiseGenerator gen(/*min_amplitude=*/test::kMinS16, - /*max_amplitude=*/test::kMaxS16); - const int number_of_noise_frames = RunClassifier(gen, sample_rate_hz()); - EXPECT_GT(number_of_noise_frames, kNumIterations / 2); -} - -// Sine curves are (very) stationary. They trigger the detector all -// the time. Except for a few initial frames. -TEST_P(SignalClassifierParametrization, SineTone) { - test::SineGenerator gen(/*amplitude=*/test::kMaxS16, /*frequency_hz=*/600.0f, - sample_rate_hz()); - const int number_of_noise_frames = RunClassifier(gen, sample_rate_hz()); - EXPECT_GE(number_of_noise_frames, kNumIterations - 5); -} - -// Pulses are transient if they are far enough apart. They shouldn't -// trigger the noise detector. -TEST_P(SignalClassifierParametrization, PulseTone) { - test::PulseGenerator gen(/*pulse_amplitude=*/test::kMaxS16, - /*no_pulse_amplitude=*/10.0f, /*frequency_hz=*/20.0f, - sample_rate_hz()); - const int number_of_noise_frames = RunClassifier(gen, sample_rate_hz()); - EXPECT_EQ(number_of_noise_frames, 0); -} - -INSTANTIATE_TEST_SUITE_P(GainController2SignalClassifier, - SignalClassifierParametrization, - ::testing::Values(8000, 16000, 32000, 48000)); - -} // namespace -} // namespace webrtc diff --git a/modules/audio_processing/agc2/vad_with_level.cc b/modules/audio_processing/agc2/vad_with_level.cc deleted file mode 100644 index 9747ca2370..0000000000 --- a/modules/audio_processing/agc2/vad_with_level.cc +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "modules/audio_processing/agc2/vad_with_level.h" - -#include -#include -#include - -#include "api/array_view.h" -#include "common_audio/include/audio_util.h" -#include "common_audio/resampler/include/push_resampler.h" -#include "modules/audio_processing/agc2/agc2_common.h" -#include "modules/audio_processing/agc2/rnn_vad/common.h" -#include "modules/audio_processing/agc2/rnn_vad/features_extraction.h" -#include "modules/audio_processing/agc2/rnn_vad/rnn.h" -#include "rtc_base/checks.h" - -namespace webrtc { -namespace { - -using VoiceActivityDetector = VadLevelAnalyzer::VoiceActivityDetector; - -// Default VAD that combines a resampler and the RNN VAD. -// Computes the speech probability on the first channel. -class Vad : public VoiceActivityDetector { - public: - explicit Vad(const AvailableCpuFeatures& cpu_features) - : features_extractor_(cpu_features), rnn_vad_(cpu_features) {} - Vad(const Vad&) = delete; - Vad& operator=(const Vad&) = delete; - ~Vad() = default; - - void Reset() override { rnn_vad_.Reset(); } - - float ComputeProbability(AudioFrameView frame) override { - // The source number of channels is 1, because we always use the 1st - // channel. - resampler_.InitializeIfNeeded( - /*sample_rate_hz=*/static_cast(frame.samples_per_channel() * 100), - rnn_vad::kSampleRate24kHz, - /*num_channels=*/1); - - std::array work_frame; - // Feed the 1st channel to the resampler. - resampler_.Resample(frame.channel(0).data(), frame.samples_per_channel(), - work_frame.data(), rnn_vad::kFrameSize10ms24kHz); - - std::array feature_vector; - const bool is_silence = features_extractor_.CheckSilenceComputeFeatures( - work_frame, feature_vector); - return rnn_vad_.ComputeVadProbability(feature_vector, is_silence); - } - - private: - PushResampler resampler_; - rnn_vad::FeaturesExtractor features_extractor_; - rnn_vad::RnnVad rnn_vad_; -}; - -} // namespace - -VadLevelAnalyzer::VadLevelAnalyzer(int vad_reset_period_ms, - const AvailableCpuFeatures& cpu_features) - : VadLevelAnalyzer(vad_reset_period_ms, - std::make_unique(cpu_features)) {} - -VadLevelAnalyzer::VadLevelAnalyzer(int vad_reset_period_ms, - std::unique_ptr vad) - : vad_(std::move(vad)), - vad_reset_period_frames_( - rtc::CheckedDivExact(vad_reset_period_ms, kFrameDurationMs)), - time_to_vad_reset_(vad_reset_period_frames_) { - RTC_DCHECK(vad_); - RTC_DCHECK_GT(vad_reset_period_frames_, 1); -} - -VadLevelAnalyzer::~VadLevelAnalyzer() = default; - -VadLevelAnalyzer::Result VadLevelAnalyzer::AnalyzeFrame( - AudioFrameView frame) { - // Periodically reset the VAD. - time_to_vad_reset_--; - if (time_to_vad_reset_ <= 0) { - vad_->Reset(); - time_to_vad_reset_ = vad_reset_period_frames_; - } - // Compute levels. - float peak = 0.0f; - float rms = 0.0f; - for (const auto& x : frame.channel(0)) { - peak = std::max(std::fabs(x), peak); - rms += x * x; - } - return {vad_->ComputeProbability(frame), - FloatS16ToDbfs(std::sqrt(rms / frame.samples_per_channel())), - FloatS16ToDbfs(peak)}; -} - -} // namespace webrtc diff --git a/modules/audio_processing/agc2/vad_with_level.h b/modules/audio_processing/agc2/vad_with_level.h deleted file mode 100644 index 8d2ae45762..0000000000 --- a/modules/audio_processing/agc2/vad_with_level.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef MODULES_AUDIO_PROCESSING_AGC2_VAD_WITH_LEVEL_H_ -#define MODULES_AUDIO_PROCESSING_AGC2_VAD_WITH_LEVEL_H_ - -#include - -#include "modules/audio_processing/agc2/cpu_features.h" -#include "modules/audio_processing/include/audio_frame_view.h" - -namespace webrtc { - -// Class to analyze voice activity and audio levels. -class VadLevelAnalyzer { - public: - struct Result { - float speech_probability; // Range: [0, 1]. - float rms_dbfs; // Root mean square power (dBFS). - float peak_dbfs; // Peak power (dBFS). - }; - - // Voice Activity Detector (VAD) interface. - class VoiceActivityDetector { - public: - virtual ~VoiceActivityDetector() = default; - // Resets the internal state. - virtual void Reset() = 0; - // Analyzes an audio frame and returns the speech probability. - virtual float ComputeProbability(AudioFrameView frame) = 0; - }; - - // Ctor. `vad_reset_period_ms` indicates the period in milliseconds to call - // `VadLevelAnalyzer::Reset()`; it must be equal to or greater than the - // duration of two frames. Uses `cpu_features` to instantiate the default VAD. - VadLevelAnalyzer(int vad_reset_period_ms, - const AvailableCpuFeatures& cpu_features); - // Ctor. Uses a custom `vad`. - VadLevelAnalyzer(int vad_reset_period_ms, - std::unique_ptr vad); - - VadLevelAnalyzer(const VadLevelAnalyzer&) = delete; - VadLevelAnalyzer& operator=(const VadLevelAnalyzer&) = delete; - ~VadLevelAnalyzer(); - - // Computes the speech probability and the level for `frame`. - Result AnalyzeFrame(AudioFrameView frame); - - private: - std::unique_ptr vad_; - const int vad_reset_period_frames_; - int time_to_vad_reset_; -}; - -} // namespace webrtc - -#endif // MODULES_AUDIO_PROCESSING_AGC2_VAD_WITH_LEVEL_H_ diff --git a/modules/audio_processing/agc2/vad_with_level_unittest.cc b/modules/audio_processing/agc2/vad_with_level_unittest.cc deleted file mode 100644 index ec8e476965..0000000000 --- a/modules/audio_processing/agc2/vad_with_level_unittest.cc +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "modules/audio_processing/agc2/vad_with_level.h" - -#include -#include -#include - -#include "modules/audio_processing/agc2/agc2_common.h" -#include "modules/audio_processing/include/audio_frame_view.h" -#include "rtc_base/gunit.h" -#include "rtc_base/numerics/safe_compare.h" -#include "test/gmock.h" - -namespace webrtc { -namespace { - -using ::testing::AnyNumber; -using ::testing::ReturnRoundRobin; - -constexpr int kNoVadPeriodicReset = - kFrameDurationMs * (std::numeric_limits::max() / kFrameDurationMs); - -constexpr int kSampleRateHz = 8000; - -class MockVad : public VadLevelAnalyzer::VoiceActivityDetector { - public: - MOCK_METHOD(void, Reset, (), (override)); - MOCK_METHOD(float, - ComputeProbability, - (AudioFrameView frame), - (override)); -}; - -// Creates a `VadLevelAnalyzer` injecting a mock VAD which repeatedly returns -// the next value from `speech_probabilities` until it reaches the end and will -// restart from the beginning. -std::unique_ptr CreateVadLevelAnalyzerWithMockVad( - int vad_reset_period_ms, - const std::vector& speech_probabilities, - int expected_vad_reset_calls = 0) { - auto vad = std::make_unique(); - EXPECT_CALL(*vad, ComputeProbability) - .Times(AnyNumber()) - .WillRepeatedly(ReturnRoundRobin(speech_probabilities)); - if (expected_vad_reset_calls >= 0) { - EXPECT_CALL(*vad, Reset).Times(expected_vad_reset_calls); - } - return std::make_unique(vad_reset_period_ms, - std::move(vad)); -} - -// 10 ms mono frame. -struct FrameWithView { - // Ctor. Initializes the frame samples with `value`. - FrameWithView(float value = 0.0f) - : channel0(samples.data()), - view(&channel0, /*num_channels=*/1, samples.size()) { - samples.fill(value); - } - std::array samples; - const float* const channel0; - const AudioFrameView view; -}; - -TEST(GainController2VadLevelAnalyzer, RmsLessThanPeakLevel) { - auto analyzer = CreateVadLevelAnalyzerWithMockVad( - /*vad_reset_period_ms=*/1500, - /*speech_probabilities=*/{1.0f}, - /*expected_vad_reset_calls=*/0); - // Handcrafted frame so that the average is lower than the peak value. - FrameWithView frame(1000.0f); // Constant frame. - frame.samples[10] = 2000.0f; // Except for one peak value. - // Compute audio frame levels. - auto levels_and_vad_prob = analyzer->AnalyzeFrame(frame.view); - EXPECT_LT(levels_and_vad_prob.rms_dbfs, levels_and_vad_prob.peak_dbfs); -} - -// Checks that the expect VAD probabilities are returned. -TEST(GainController2VadLevelAnalyzer, NoSpeechProbabilitySmoothing) { - const std::vector speech_probabilities{0.709f, 0.484f, 0.882f, 0.167f, - 0.44f, 0.525f, 0.858f, 0.314f, - 0.653f, 0.965f, 0.413f, 0.0f}; - auto analyzer = CreateVadLevelAnalyzerWithMockVad(kNoVadPeriodicReset, - speech_probabilities); - FrameWithView frame; - for (int i = 0; rtc::SafeLt(i, speech_probabilities.size()); ++i) { - SCOPED_TRACE(i); - EXPECT_EQ(speech_probabilities[i], - analyzer->AnalyzeFrame(frame.view).speech_probability); - } -} - -// Checks that the VAD is not periodically reset. -TEST(GainController2VadLevelAnalyzer, VadNoPeriodicReset) { - constexpr int kNumFrames = 19; - auto analyzer = CreateVadLevelAnalyzerWithMockVad( - kNoVadPeriodicReset, /*speech_probabilities=*/{1.0f}, - /*expected_vad_reset_calls=*/0); - FrameWithView frame; - for (int i = 0; i < kNumFrames; ++i) { - analyzer->AnalyzeFrame(frame.view); - } -} - -class VadPeriodResetParametrization - : public ::testing::TestWithParam> { - protected: - int num_frames() const { return std::get<0>(GetParam()); } - int vad_reset_period_frames() const { return std::get<1>(GetParam()); } -}; - -// Checks that the VAD is periodically reset with the expected period. -TEST_P(VadPeriodResetParametrization, VadPeriodicReset) { - auto analyzer = CreateVadLevelAnalyzerWithMockVad( - /*vad_reset_period_ms=*/vad_reset_period_frames() * kFrameDurationMs, - /*speech_probabilities=*/{1.0f}, - /*expected_vad_reset_calls=*/num_frames() / vad_reset_period_frames()); - FrameWithView frame; - for (int i = 0; i < num_frames(); ++i) { - analyzer->AnalyzeFrame(frame.view); - } -} - -INSTANTIATE_TEST_SUITE_P(GainController2VadLevelAnalyzer, - VadPeriodResetParametrization, - ::testing::Combine(::testing::Values(1, 19, 123), - ::testing::Values(2, 5, 20, 53))); - -} // namespace -} // namespace webrtc diff --git a/modules/audio_processing/agc2/vad_wrapper.cc b/modules/audio_processing/agc2/vad_wrapper.cc new file mode 100644 index 0000000000..91448f8d86 --- /dev/null +++ b/modules/audio_processing/agc2/vad_wrapper.cc @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/agc2/vad_wrapper.h" + +#include +#include + +#include "api/array_view.h" +#include "common_audio/resampler/include/push_resampler.h" +#include "modules/audio_processing/agc2/agc2_common.h" +#include "modules/audio_processing/agc2/rnn_vad/common.h" +#include "modules/audio_processing/agc2/rnn_vad/features_extraction.h" +#include "modules/audio_processing/agc2/rnn_vad/rnn.h" +#include "rtc_base/checks.h" + +namespace webrtc { +namespace { + +constexpr int kNumFramesPerSecond = 100; + +class MonoVadImpl : public VoiceActivityDetectorWrapper::MonoVad { + public: + explicit MonoVadImpl(const AvailableCpuFeatures& cpu_features) + : features_extractor_(cpu_features), rnn_vad_(cpu_features) {} + MonoVadImpl(const MonoVadImpl&) = delete; + MonoVadImpl& operator=(const MonoVadImpl&) = delete; + ~MonoVadImpl() = default; + + int SampleRateHz() const override { return rnn_vad::kSampleRate24kHz; } + void Reset() override { rnn_vad_.Reset(); } + float Analyze(rtc::ArrayView frame) override { + RTC_DCHECK_EQ(frame.size(), rnn_vad::kFrameSize10ms24kHz); + std::array feature_vector; + const bool is_silence = features_extractor_.CheckSilenceComputeFeatures( + /*samples=*/{frame.data(), rnn_vad::kFrameSize10ms24kHz}, + feature_vector); + return rnn_vad_.ComputeVadProbability(feature_vector, is_silence); + } + + private: + rnn_vad::FeaturesExtractor features_extractor_; + rnn_vad::RnnVad rnn_vad_; +}; + +} // namespace + +VoiceActivityDetectorWrapper::VoiceActivityDetectorWrapper( + int vad_reset_period_ms, + const AvailableCpuFeatures& cpu_features, + int sample_rate_hz) + : VoiceActivityDetectorWrapper(vad_reset_period_ms, + std::make_unique(cpu_features), + sample_rate_hz) {} + +VoiceActivityDetectorWrapper::VoiceActivityDetectorWrapper( + int vad_reset_period_ms, + std::unique_ptr vad, + int sample_rate_hz) + : vad_reset_period_frames_( + rtc::CheckedDivExact(vad_reset_period_ms, kFrameDurationMs)), + time_to_vad_reset_(vad_reset_period_frames_), + vad_(std::move(vad)) { + RTC_DCHECK(vad_); + RTC_DCHECK_GT(vad_reset_period_frames_, 1); + resampled_buffer_.resize( + rtc::CheckedDivExact(vad_->SampleRateHz(), kNumFramesPerSecond)); + Initialize(sample_rate_hz); +} + +VoiceActivityDetectorWrapper::~VoiceActivityDetectorWrapper() = default; + +void VoiceActivityDetectorWrapper::Initialize(int sample_rate_hz) { + RTC_DCHECK_GT(sample_rate_hz, 0); + frame_size_ = rtc::CheckedDivExact(sample_rate_hz, kNumFramesPerSecond); + int status = + resampler_.InitializeIfNeeded(sample_rate_hz, vad_->SampleRateHz(), + /*num_channels=*/1); + constexpr int kStatusOk = 0; + RTC_DCHECK_EQ(status, kStatusOk); + vad_->Reset(); +} + +float VoiceActivityDetectorWrapper::Analyze(AudioFrameView frame) { + // Periodically reset the VAD. + time_to_vad_reset_--; + if (time_to_vad_reset_ <= 0) { + vad_->Reset(); + time_to_vad_reset_ = vad_reset_period_frames_; + } + // Resample the first channel of `frame`. + RTC_DCHECK_EQ(frame.samples_per_channel(), frame_size_); + resampler_.Resample(frame.channel(0).data(), frame_size_, + resampled_buffer_.data(), resampled_buffer_.size()); + + return vad_->Analyze(resampled_buffer_); +} + +} // namespace webrtc diff --git a/modules/audio_processing/agc2/vad_wrapper.h b/modules/audio_processing/agc2/vad_wrapper.h new file mode 100644 index 0000000000..6df0ead271 --- /dev/null +++ b/modules/audio_processing/agc2/vad_wrapper.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_AUDIO_PROCESSING_AGC2_VAD_WRAPPER_H_ +#define MODULES_AUDIO_PROCESSING_AGC2_VAD_WRAPPER_H_ + +#include +#include + +#include "api/array_view.h" +#include "common_audio/resampler/include/push_resampler.h" +#include "modules/audio_processing/agc2/cpu_features.h" +#include "modules/audio_processing/include/audio_frame_view.h" + +namespace webrtc { + +// Wraps a single-channel Voice Activity Detector (VAD) which is used to analyze +// the first channel of the input audio frames. Takes care of resampling the +// input frames to match the sample rate of the wrapped VAD and periodically +// resets the VAD. +class VoiceActivityDetectorWrapper { + public: + // Single channel VAD interface. + class MonoVad { + public: + virtual ~MonoVad() = default; + // Returns the sample rate (Hz) required for the input frames analyzed by + // `ComputeProbability`. + virtual int SampleRateHz() const = 0; + // Resets the internal state. + virtual void Reset() = 0; + // Analyzes an audio frame and returns the speech probability. + virtual float Analyze(rtc::ArrayView frame) = 0; + }; + + // Ctor. `vad_reset_period_ms` indicates the period in milliseconds to call + // `MonoVad::Reset()`; it must be equal to or greater than the duration of two + // frames. Uses `cpu_features` to instantiate the default VAD. + VoiceActivityDetectorWrapper(int vad_reset_period_ms, + const AvailableCpuFeatures& cpu_features, + int sample_rate_hz); + // Ctor. Uses a custom `vad`. + VoiceActivityDetectorWrapper(int vad_reset_period_ms, + std::unique_ptr vad, + int sample_rate_hz); + + VoiceActivityDetectorWrapper(const VoiceActivityDetectorWrapper&) = delete; + VoiceActivityDetectorWrapper& operator=(const VoiceActivityDetectorWrapper&) = + delete; + ~VoiceActivityDetectorWrapper(); + + // Initializes the VAD wrapper. + void Initialize(int sample_rate_hz); + + // Analyzes the first channel of `frame` and returns the speech probability. + // `frame` must be a 10 ms frame with the sample rate specified in the last + // `Initialize()` call. + float Analyze(AudioFrameView frame); + + private: + const int vad_reset_period_frames_; + int frame_size_; + int time_to_vad_reset_; + PushResampler resampler_; + std::unique_ptr vad_; + std::vector resampled_buffer_; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_PROCESSING_AGC2_VAD_WRAPPER_H_ diff --git a/modules/audio_processing/agc2/vad_wrapper_unittest.cc b/modules/audio_processing/agc2/vad_wrapper_unittest.cc new file mode 100644 index 0000000000..91efdb566e --- /dev/null +++ b/modules/audio_processing/agc2/vad_wrapper_unittest.cc @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/audio_processing/agc2/vad_wrapper.h" + +#include +#include +#include +#include +#include + +#include "modules/audio_processing/agc2/agc2_common.h" +#include "modules/audio_processing/include/audio_frame_view.h" +#include "rtc_base/checks.h" +#include "rtc_base/gunit.h" +#include "rtc_base/numerics/safe_compare.h" +#include "test/gmock.h" + +namespace webrtc { +namespace { + +using ::testing::AnyNumber; +using ::testing::Return; +using ::testing::ReturnRoundRobin; +using ::testing::Truly; + +constexpr int kNumFramesPerSecond = 100; + +constexpr int kNoVadPeriodicReset = + kFrameDurationMs * (std::numeric_limits::max() / kFrameDurationMs); + +constexpr int kSampleRate8kHz = 8000; + +class MockVad : public VoiceActivityDetectorWrapper::MonoVad { + public: + MOCK_METHOD(int, SampleRateHz, (), (const, override)); + MOCK_METHOD(void, Reset, (), (override)); + MOCK_METHOD(float, Analyze, (rtc::ArrayView frame), (override)); +}; + +// Checks that the ctor and `Initialize()` read the sample rate of the wrapped +// VAD. +TEST(GainController2VoiceActivityDetectorWrapper, CtorAndInitReadSampleRate) { + auto vad = std::make_unique(); + EXPECT_CALL(*vad, SampleRateHz) + .Times(2) + .WillRepeatedly(Return(kSampleRate8kHz)); + EXPECT_CALL(*vad, Reset).Times(AnyNumber()); + auto vad_wrapper = std::make_unique( + kNoVadPeriodicReset, std::move(vad), kSampleRate8kHz); +} + +// Creates a `VoiceActivityDetectorWrapper` injecting a mock VAD that +// repeatedly returns the next value from `speech_probabilities` and that +// restarts from the beginning when after the last element is returned. +std::unique_ptr CreateMockVadWrapper( + int vad_reset_period_ms, + int sample_rate_hz, + const std::vector& speech_probabilities, + int expected_vad_reset_calls) { + auto vad = std::make_unique(); + EXPECT_CALL(*vad, SampleRateHz) + .Times(AnyNumber()) + .WillRepeatedly(Return(sample_rate_hz)); + if (expected_vad_reset_calls >= 0) { + EXPECT_CALL(*vad, Reset).Times(expected_vad_reset_calls); + } + EXPECT_CALL(*vad, Analyze) + .Times(AnyNumber()) + .WillRepeatedly(ReturnRoundRobin(speech_probabilities)); + return std::make_unique( + vad_reset_period_ms, std::move(vad), kSampleRate8kHz); +} + +// 10 ms mono frame. +struct FrameWithView { + // Ctor. Initializes the frame samples with `value`. + explicit FrameWithView(int sample_rate_hz) + : samples(rtc::CheckedDivExact(sample_rate_hz, kNumFramesPerSecond), + 0.0f), + channel0(samples.data()), + view(&channel0, /*num_channels=*/1, samples.size()) {} + std::vector samples; + const float* const channel0; + const AudioFrameView view; +}; + +// Checks that the expected speech probabilities are returned. +TEST(GainController2VoiceActivityDetectorWrapper, CheckSpeechProbabilities) { + const std::vector speech_probabilities{0.709f, 0.484f, 0.882f, 0.167f, + 0.44f, 0.525f, 0.858f, 0.314f, + 0.653f, 0.965f, 0.413f, 0.0f}; + auto vad_wrapper = CreateMockVadWrapper(kNoVadPeriodicReset, kSampleRate8kHz, + speech_probabilities, + /*expected_vad_reset_calls=*/1); + FrameWithView frame(kSampleRate8kHz); + for (int i = 0; rtc::SafeLt(i, speech_probabilities.size()); ++i) { + SCOPED_TRACE(i); + EXPECT_EQ(speech_probabilities[i], vad_wrapper->Analyze(frame.view)); + } +} + +// Checks that the VAD is not periodically reset. +TEST(GainController2VoiceActivityDetectorWrapper, VadNoPeriodicReset) { + constexpr int kNumFrames = 19; + auto vad_wrapper = CreateMockVadWrapper(kNoVadPeriodicReset, kSampleRate8kHz, + /*speech_probabilities=*/{1.0f}, + /*expected_vad_reset_calls=*/1); + FrameWithView frame(kSampleRate8kHz); + for (int i = 0; i < kNumFrames; ++i) { + vad_wrapper->Analyze(frame.view); + } +} + +class VadPeriodResetParametrization + : public ::testing::TestWithParam> { + protected: + int num_frames() const { return std::get<0>(GetParam()); } + int vad_reset_period_frames() const { return std::get<1>(GetParam()); } +}; + +// Checks that the VAD is periodically reset with the expected period. +TEST_P(VadPeriodResetParametrization, VadPeriodicReset) { + auto vad_wrapper = CreateMockVadWrapper( + /*vad_reset_period_ms=*/vad_reset_period_frames() * kFrameDurationMs, + kSampleRate8kHz, + /*speech_probabilities=*/{1.0f}, + /*expected_vad_reset_calls=*/1 + + num_frames() / vad_reset_period_frames()); + FrameWithView frame(kSampleRate8kHz); + for (int i = 0; i < num_frames(); ++i) { + vad_wrapper->Analyze(frame.view); + } +} + +INSTANTIATE_TEST_SUITE_P(GainController2VoiceActivityDetectorWrapper, + VadPeriodResetParametrization, + ::testing::Combine(::testing::Values(1, 19, 123), + ::testing::Values(2, 5, 20, 53))); + +class VadResamplingParametrization + : public ::testing::TestWithParam> { + protected: + int input_sample_rate_hz() const { return std::get<0>(GetParam()); } + int vad_sample_rate_hz() const { return std::get<1>(GetParam()); } +}; + +// Checks that regardless of the input audio sample rate, the wrapped VAD +// analyzes frames having the expected size, that is according to its internal +// sample rate. +TEST_P(VadResamplingParametrization, CheckResampledFrameSize) { + auto vad = std::make_unique(); + EXPECT_CALL(*vad, SampleRateHz) + .Times(AnyNumber()) + .WillRepeatedly(Return(vad_sample_rate_hz())); + EXPECT_CALL(*vad, Reset).Times(1); + EXPECT_CALL(*vad, Analyze(Truly([this](rtc::ArrayView frame) { + return rtc::SafeEq(frame.size(), rtc::CheckedDivExact(vad_sample_rate_hz(), + kNumFramesPerSecond)); + }))).Times(1); + auto vad_wrapper = std::make_unique( + kNoVadPeriodicReset, std::move(vad), input_sample_rate_hz()); + FrameWithView frame(input_sample_rate_hz()); + vad_wrapper->Analyze(frame.view); +} + +INSTANTIATE_TEST_SUITE_P( + GainController2VoiceActivityDetectorWrapper, + VadResamplingParametrization, + ::testing::Combine(::testing::Values(8000, 16000, 44100, 48000), + ::testing::Values(6000, 8000, 12000, 16000, 24000))); + +} // namespace +} // namespace webrtc diff --git a/modules/audio_processing/audio_buffer.h b/modules/audio_processing/audio_buffer.h index 3eecf0dbaa..ab0af4493c 100644 --- a/modules/audio_processing/audio_buffer.h +++ b/modules/audio_processing/audio_buffer.h @@ -71,8 +71,8 @@ class AudioBuffer { // Usage: // channels()[channel][sample]. // Where: - // 0 <= channel < |buffer_num_channels_| - // 0 <= sample < |buffer_num_frames_| + // 0 <= channel < `buffer_num_channels_` + // 0 <= sample < `buffer_num_frames_` float* const* channels() { return data_->channels(); } const float* const* channels_const() const { return data_->channels(); } @@ -80,9 +80,9 @@ class AudioBuffer { // Usage: // split_bands(channel)[band][sample]. // Where: - // 0 <= channel < |buffer_num_channels_| - // 0 <= band < |num_bands_| - // 0 <= sample < |num_split_frames_| + // 0 <= channel < `buffer_num_channels_` + // 0 <= band < `num_bands_` + // 0 <= sample < `num_split_frames_` const float* const* split_bands_const(size_t channel) const { return split_data_.get() ? split_data_->bands(channel) : data_->bands(channel); @@ -96,9 +96,9 @@ class AudioBuffer { // Usage: // split_channels(band)[channel][sample]. // Where: - // 0 <= band < |num_bands_| - // 0 <= channel < |buffer_num_channels_| - // 0 <= sample < |num_split_frames_| + // 0 <= band < `num_bands_` + // 0 <= channel < `buffer_num_channels_` + // 0 <= sample < `num_split_frames_` const float* const* split_channels_const(Band band) const { if (split_data_.get()) { return split_data_->channels(band); diff --git a/modules/audio_processing/audio_frame_view_unittest.cc b/modules/audio_processing/audio_frame_view_unittest.cc index a4ad4ccd8c..fd25bc3b0b 100644 --- a/modules/audio_processing/audio_frame_view_unittest.cc +++ b/modules/audio_processing/audio_frame_view_unittest.cc @@ -19,7 +19,7 @@ TEST(AudioFrameTest, ConstructFromAudioBuffer) { constexpr int kNumChannels = 2; constexpr float kFloatConstant = 1272.f; constexpr float kIntConstant = 17252; - const webrtc::StreamConfig stream_config(kSampleRateHz, kNumChannels, false); + const webrtc::StreamConfig stream_config(kSampleRateHz, kNumChannels); webrtc::AudioBuffer buffer( stream_config.sample_rate_hz(), stream_config.num_channels(), stream_config.sample_rate_hz(), stream_config.num_channels(), diff --git a/modules/audio_processing/audio_processing_builder_impl.cc b/modules/audio_processing/audio_processing_builder_impl.cc index f55c9158fb..79c6fbf524 100644 --- a/modules/audio_processing/audio_processing_builder_impl.cc +++ b/modules/audio_processing/audio_processing_builder_impl.cc @@ -20,23 +20,13 @@ namespace webrtc { AudioProcessingBuilder::AudioProcessingBuilder() = default; AudioProcessingBuilder::~AudioProcessingBuilder() = default; -AudioProcessing* AudioProcessingBuilder::Create() { - webrtc::Config config; - return Create(config); -} - -AudioProcessing* AudioProcessingBuilder::Create(const webrtc::Config& config) { +rtc::scoped_refptr AudioProcessingBuilder::Create() { #ifdef WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE - - // Implementation returning a null pointer for using when the APM is excluded - // from the build.. + // Return a null pointer when the APM is excluded from the build. return nullptr; - -#else - - // Standard implementation. - return new rtc::RefCountedObject( - config, std::move(capture_post_processing_), +#else // WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE + return rtc::make_ref_counted( + config_, std::move(capture_post_processing_), std::move(render_pre_processing_), std::move(echo_control_factory_), std::move(echo_detector_), std::move(capture_analyzer_)); #endif diff --git a/modules/audio_processing/audio_processing_impl.cc b/modules/audio_processing/audio_processing_impl.cc index 4a1985545f..9a1aaee821 100644 --- a/modules/audio_processing/audio_processing_impl.cc +++ b/modules/audio_processing/audio_processing_impl.cc @@ -24,17 +24,16 @@ #include "common_audio/include/audio_util.h" #include "modules/audio_processing/aec_dump/aec_dump_factory.h" #include "modules/audio_processing/audio_buffer.h" -#include "modules/audio_processing/common.h" #include "modules/audio_processing/include/audio_frame_view.h" #include "modules/audio_processing/logging/apm_data_dumper.h" #include "modules/audio_processing/optionally_built_submodule_creators.h" #include "rtc_base/atomic_ops.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/logging.h" #include "rtc_base/ref_counted_object.h" #include "rtc_base/time_utils.h" #include "rtc_base/trace_event.h" +#include "system_wrappers/include/denormal_disabler.h" #include "system_wrappers/include/field_trial.h" #include "system_wrappers/include/metrics.h" @@ -50,20 +49,6 @@ namespace webrtc { namespace { -static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) { - switch (layout) { - case AudioProcessing::kMono: - case AudioProcessing::kStereo: - return false; - case AudioProcessing::kMonoAndKeyboard: - case AudioProcessing::kStereoAndKeyboard: - return true; - } - - RTC_NOTREACHED(); - return false; -} - bool SampleRateSupportsMultiBand(int sample_rate_hz) { return sample_rate_hz == AudioProcessing::kSampleRate32kHz || sample_rate_hz == AudioProcessing::kSampleRate48kHz; @@ -96,7 +81,7 @@ int SuitableProcessRate(int minimum_rate, return rate; } } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return uppermost_native_rate; } @@ -128,6 +113,13 @@ static const size_t kMaxAllowedValuesOfSamplesPerFrame = 480; // reverse and forward call numbers. static const size_t kMaxNumFramesToBuffer = 100; +void PackRenderAudioBufferForEchoDetector(const AudioBuffer& audio, + std::vector& packed_buffer) { + packed_buffer.clear(); + packed_buffer.insert(packed_buffer.end(), audio.channels_const()[0], + audio.channels_const()[0] + audio.num_frames()); +} + } // namespace // Throughout webrtc, it's assumed that success is represented by zero. @@ -144,38 +136,31 @@ AudioProcessingImpl::SubmoduleStates::SubmoduleStates( bool AudioProcessingImpl::SubmoduleStates::Update( bool high_pass_filter_enabled, bool mobile_echo_controller_enabled, - bool residual_echo_detector_enabled, bool noise_suppressor_enabled, bool adaptive_gain_controller_enabled, bool gain_controller2_enabled, bool gain_adjustment_enabled, bool echo_controller_enabled, - bool voice_detector_enabled, bool transient_suppressor_enabled) { bool changed = false; changed |= (high_pass_filter_enabled != high_pass_filter_enabled_); changed |= (mobile_echo_controller_enabled != mobile_echo_controller_enabled_); - changed |= - (residual_echo_detector_enabled != residual_echo_detector_enabled_); changed |= (noise_suppressor_enabled != noise_suppressor_enabled_); changed |= (adaptive_gain_controller_enabled != adaptive_gain_controller_enabled_); changed |= (gain_controller2_enabled != gain_controller2_enabled_); changed |= (gain_adjustment_enabled != gain_adjustment_enabled_); changed |= (echo_controller_enabled != echo_controller_enabled_); - changed |= (voice_detector_enabled != voice_detector_enabled_); changed |= (transient_suppressor_enabled != transient_suppressor_enabled_); if (changed) { high_pass_filter_enabled_ = high_pass_filter_enabled; mobile_echo_controller_enabled_ = mobile_echo_controller_enabled; - residual_echo_detector_enabled_ = residual_echo_detector_enabled; noise_suppressor_enabled_ = noise_suppressor_enabled; adaptive_gain_controller_enabled_ = adaptive_gain_controller_enabled; gain_controller2_enabled_ = gain_controller2_enabled; gain_adjustment_enabled_ = gain_adjustment_enabled; echo_controller_enabled_ = echo_controller_enabled; - voice_detector_enabled_ = voice_detector_enabled; transient_suppressor_enabled_ = transient_suppressor_enabled; } @@ -186,7 +171,7 @@ bool AudioProcessingImpl::SubmoduleStates::Update( bool AudioProcessingImpl::SubmoduleStates::CaptureMultiBandSubModulesActive() const { - return CaptureMultiBandProcessingPresent() || voice_detector_enabled_; + return CaptureMultiBandProcessingPresent(); } bool AudioProcessingImpl::SubmoduleStates::CaptureMultiBandProcessingPresent() @@ -233,8 +218,8 @@ bool AudioProcessingImpl::SubmoduleStates::HighPassFilteringRequired() const { noise_suppressor_enabled_; } -AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config) - : AudioProcessingImpl(config, +AudioProcessingImpl::AudioProcessingImpl() + : AudioProcessingImpl(/*config=*/{}, /*capture_post_processor=*/nullptr, /*render_pre_processor=*/nullptr, /*echo_control_factory=*/nullptr, @@ -244,7 +229,7 @@ AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config) int AudioProcessingImpl::instance_count_ = 0; AudioProcessingImpl::AudioProcessingImpl( - const webrtc::Config& config, + const AudioProcessing::Config& config, std::unique_ptr capture_post_processor, std::unique_ptr render_pre_processor, std::unique_ptr echo_control_factory, @@ -254,11 +239,14 @@ AudioProcessingImpl::AudioProcessingImpl( new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))), use_setup_specific_default_aec3_config_( UseSetupSpecificDefaultAec3Congfig()), + use_denormal_disabler_( + !field_trial::IsEnabled("WebRTC-ApmDenormalDisablerKillSwitch")), capture_runtime_settings_(RuntimeSettingQueueSize()), render_runtime_settings_(RuntimeSettingQueueSize()), capture_runtime_settings_enqueuer_(&capture_runtime_settings_), render_runtime_settings_enqueuer_(&render_runtime_settings_), echo_control_factory_(std::move(echo_control_factory)), + config_(config), submodule_states_(!!capture_post_processor, !!render_pre_processor, !!capture_analyzer), @@ -284,33 +272,14 @@ AudioProcessingImpl::AudioProcessingImpl( << !!submodules_.capture_post_processor << "\nRender pre processor: " << !!submodules_.render_pre_processor; + RTC_LOG(LS_INFO) << "Denormal disabler: " + << (DenormalDisabler::IsSupported() ? "supported" + : "unsupported"); // Mark Echo Controller enabled if a factory is injected. capture_nonlocked_.echo_controller_enabled = static_cast(echo_control_factory_); - // If no echo detector is injected, use the ResidualEchoDetector. - if (!submodules_.echo_detector) { - submodules_.echo_detector = rtc::make_ref_counted(); - } - -#if !(defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)) - // TODO(webrtc:5298): Remove once the use of ExperimentalNs has been - // deprecated. - config_.transient_suppression.enabled = config.Get().enabled; - - // TODO(webrtc:5298): Remove once the use of ExperimentalAgc has been - // deprecated. - config_.gain_controller1.analog_gain_controller.enabled = - config.Get().enabled; - config_.gain_controller1.analog_gain_controller.startup_min_volume = - config.Get().startup_min_volume; - config_.gain_controller1.analog_gain_controller.clipped_level_min = - config.Get().clipped_level_min; - config_.gain_controller1.analog_gain_controller.enable_digital_adaptive = - !config.Get().digital_adaptive_disabled; -#endif - Initialize(); } @@ -324,26 +293,6 @@ int AudioProcessingImpl::Initialize() { return kNoError; } -int AudioProcessingImpl::Initialize(int capture_input_sample_rate_hz, - int capture_output_sample_rate_hz, - int render_input_sample_rate_hz, - ChannelLayout capture_input_layout, - ChannelLayout capture_output_layout, - ChannelLayout render_input_layout) { - const ProcessingConfig processing_config = { - {{capture_input_sample_rate_hz, ChannelsFromLayout(capture_input_layout), - LayoutHasKeyboard(capture_input_layout)}, - {capture_output_sample_rate_hz, - ChannelsFromLayout(capture_output_layout), - LayoutHasKeyboard(capture_output_layout)}, - {render_input_sample_rate_hz, ChannelsFromLayout(render_input_layout), - LayoutHasKeyboard(render_input_layout)}, - {render_input_sample_rate_hz, ChannelsFromLayout(render_input_layout), - LayoutHasKeyboard(render_input_layout)}}}; - - return Initialize(processing_config); -} - int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { // Run in a single-threaded manner during initialization. MutexLock lock_render(&mutex_render_); @@ -419,10 +368,9 @@ void AudioProcessingImpl::InitializeLocked() { InitializeGainController1(); InitializeTransientSuppressor(); InitializeHighPassFilter(true); - InitializeVoiceDetector(); InitializeResidualEchoDetector(); InitializeEchoController(); - InitializeGainController2(); + InitializeGainController2(/*config_has_changed=*/true); InitializeNoiseSuppressor(); InitializeAnalyzer(); InitializePostProcessor(); @@ -554,9 +502,6 @@ void AudioProcessingImpl::ApplyConfig(const AudioProcessing::Config& config) { const bool agc2_config_changed = config_.gain_controller2 != config.gain_controller2; - const bool voice_detection_config_changed = - config_.voice_detection.enabled != config.voice_detection.enabled; - const bool ns_config_changed = config_.noise_suppression.enabled != config.noise_suppression.enabled || config_.noise_suppression.level != config.noise_suppression.level; @@ -599,22 +544,12 @@ void AudioProcessingImpl::ApplyConfig(const AudioProcessing::Config& config) { config_.gain_controller2 = AudioProcessing::Config::GainController2(); } - if (agc2_config_changed) { - InitializeGainController2(); - } + InitializeGainController2(agc2_config_changed); if (pre_amplifier_config_changed || gain_adjustment_config_changed) { InitializeCaptureLevelsAdjuster(); } - if (config_.level_estimation.enabled && !submodules_.output_level_estimator) { - submodules_.output_level_estimator = std::make_unique(); - } - - if (voice_detection_config_changed) { - InitializeVoiceDetector(); - } - // Reinitialization must happen after all submodule configuration to avoid // additional reinitializations on the next capture / render processing call. if (pipeline_config_changed) { @@ -718,12 +653,12 @@ bool AudioProcessingImpl::PostRuntimeSetting(RuntimeSetting setting) { return enqueueing_successful; } case RuntimeSetting::Type::kNotSpecified: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return true; } // The language allows the enum to have a non-enumerator // value. Check that this doesn't happen. - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return true; } @@ -791,12 +726,12 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, RETURN_ON_ERR(MaybeInitializeCapture(input_config, output_config)); MutexLock lock_capture(&mutex_capture_); + DenormalDisabler denormal_disabler(use_denormal_disabler_); if (aec_dump_) { RecordUnprocessedCaptureStream(src); } - capture_.keyboard_info.Extract(src, formats_.api_format.input_stream()); capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); if (capture_.capture_fullband_audio) { capture_.capture_fullband_audio->CopyFrom( @@ -882,7 +817,7 @@ void AudioProcessingImpl::HandleCaptureRuntimeSettings() { float value; setting.GetFloat(&value); config_.gain_controller2.fixed_digital.gain_db = value; - submodules_.gain_controller2->ApplyConfig(config_.gain_controller2); + submodules_.gain_controller2->SetFixedGainDb(value); } break; } @@ -893,13 +828,13 @@ void AudioProcessingImpl::HandleCaptureRuntimeSettings() { break; } case RuntimeSetting::Type::kPlayoutAudioDeviceChange: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); break; case RuntimeSetting::Type::kCustomRenderProcessingRuntimeSetting: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); break; case RuntimeSetting::Type::kNotSpecified: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); break; case RuntimeSetting::Type::kCaptureOutputUsed: bool value; @@ -943,7 +878,7 @@ void AudioProcessingImpl::HandleRenderRuntimeSettings() { case RuntimeSetting::Type::kCaptureFixedPostGain: // fall-through case RuntimeSetting::Type::kCaptureOutputUsed: // fall-through case RuntimeSetting::Type::kNotSpecified: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); break; } } @@ -984,16 +919,18 @@ void AudioProcessingImpl::QueueBandedRenderAudio(AudioBuffer* audio) { } void AudioProcessingImpl::QueueNonbandedRenderAudio(AudioBuffer* audio) { - ResidualEchoDetector::PackRenderAudioBuffer(audio, &red_render_queue_buffer_); - - // Insert the samples into the queue. - if (!red_render_signal_queue_->Insert(&red_render_queue_buffer_)) { - // The data queue is full and needs to be emptied. - EmptyQueuedRenderAudio(); + if (submodules_.echo_detector) { + PackRenderAudioBufferForEchoDetector(*audio, red_render_queue_buffer_); + RTC_DCHECK(red_render_signal_queue_); + // Insert the samples into the queue. + if (!red_render_signal_queue_->Insert(&red_render_queue_buffer_)) { + // The data queue is full and needs to be emptied. + EmptyQueuedRenderAudio(); - // Retry the insert (should always work). - bool result = red_render_signal_queue_->Insert(&red_render_queue_buffer_); - RTC_DCHECK(result); + // Retry the insert (should always work). + bool result = red_render_signal_queue_->Insert(&red_render_queue_buffer_); + RTC_DCHECK(result); + } } } @@ -1026,23 +963,26 @@ void AudioProcessingImpl::AllocateRenderQueue() { agc_render_signal_queue_->Clear(); } - if (red_render_queue_element_max_size_ < - new_red_render_queue_element_max_size) { - red_render_queue_element_max_size_ = new_red_render_queue_element_max_size; + if (submodules_.echo_detector) { + if (red_render_queue_element_max_size_ < + new_red_render_queue_element_max_size) { + red_render_queue_element_max_size_ = + new_red_render_queue_element_max_size; - std::vector template_queue_element( - red_render_queue_element_max_size_); + std::vector template_queue_element( + red_render_queue_element_max_size_); - red_render_signal_queue_.reset( - new SwapQueue, RenderQueueItemVerifier>( - kMaxNumFramesToBuffer, template_queue_element, - RenderQueueItemVerifier( - red_render_queue_element_max_size_))); + red_render_signal_queue_.reset( + new SwapQueue, RenderQueueItemVerifier>( + kMaxNumFramesToBuffer, template_queue_element, + RenderQueueItemVerifier( + red_render_queue_element_max_size_))); - red_render_queue_buffer_.resize(red_render_queue_element_max_size_); - red_capture_queue_buffer_.resize(red_render_queue_element_max_size_); - } else { - red_render_signal_queue_->Clear(); + red_render_queue_buffer_.resize(red_render_queue_element_max_size_); + red_capture_queue_buffer_.resize(red_render_queue_element_max_size_); + } else { + red_render_signal_queue_->Clear(); + } } } @@ -1066,9 +1006,10 @@ void AudioProcessingImpl::EmptyQueuedRenderAudioLocked() { } } - while (red_render_signal_queue_->Remove(&red_capture_queue_buffer_)) { - RTC_DCHECK(submodules_.echo_detector); - submodules_.echo_detector->AnalyzeRenderAudio(red_capture_queue_buffer_); + if (submodules_.echo_detector) { + while (red_render_signal_queue_->Remove(&red_capture_queue_buffer_)) { + submodules_.echo_detector->AnalyzeRenderAudio(red_capture_queue_buffer_); + } } } @@ -1080,6 +1021,7 @@ int AudioProcessingImpl::ProcessStream(const int16_t* const src, RETURN_ON_ERR(MaybeInitializeCapture(input_config, output_config)); MutexLock lock_capture(&mutex_capture_); + DenormalDisabler denormal_disabler(use_denormal_disabler_); if (aec_dump_) { RecordUnprocessedCaptureStream(src, input_config); @@ -1109,6 +1051,7 @@ int AudioProcessingImpl::ProcessStream(const int16_t* const src, int AudioProcessingImpl::ProcessCaptureStreamLocked() { EmptyQueuedRenderAudioLocked(); HandleCaptureRuntimeSettings(); + DenormalDisabler denormal_disabler(use_denormal_disabler_); // Ensure that not both the AEC and AECM are active at the same time. // TODO(peah): Simplify once the public API Enable functions for these @@ -1156,13 +1099,16 @@ int AudioProcessingImpl::ProcessCaptureStreamLocked() { levels.peak, 1, RmsLevel::kMinLevelDb, 64); } + // Detect an analog gain change. + int analog_mic_level = recommended_stream_analog_level_locked(); + const bool analog_mic_level_changed = + capture_.prev_analog_mic_level != analog_mic_level && + capture_.prev_analog_mic_level != -1; + capture_.prev_analog_mic_level = analog_mic_level; + analog_gain_stats_reporter_.UpdateStatistics(analog_mic_level); + if (submodules_.echo_controller) { - // Detect and flag any change in the analog gain. - int analog_mic_level = recommended_stream_analog_level_locked(); - capture_.echo_path_gain_change = - capture_.prev_analog_mic_level != analog_mic_level && - capture_.prev_analog_mic_level != -1; - capture_.prev_analog_mic_level = analog_mic_level; + capture_.echo_path_gain_change = analog_mic_level_changed; // Detect and flag any change in the capture level adjustment pre-gain. if (submodules_.capture_levels_adjuster) { @@ -1258,13 +1204,6 @@ int AudioProcessingImpl::ProcessCaptureStreamLocked() { } } - if (config_.voice_detection.enabled) { - capture_.stats.voice_detected = - submodules_.voice_detector->ProcessCaptureAudio(capture_buffer); - } else { - capture_.stats.voice_detected = absl::nullopt; - } - if (submodules_.agc_manager) { submodules_.agc_manager->Process(capture_buffer); @@ -1287,7 +1226,6 @@ int AudioProcessingImpl::ProcessCaptureStreamLocked() { capture_buffer->MergeFrequencyBands(); } - capture_.stats.output_rms_dbfs = absl::nullopt; if (capture_.capture_output_used) { if (capture_.capture_fullband_audio) { const auto& ec = submodules_.echo_controller; @@ -1300,8 +1238,7 @@ int AudioProcessingImpl::ProcessCaptureStreamLocked() { capture_buffer = capture_.capture_fullband_audio.get(); } - if (config_.residual_echo_detector.enabled) { - RTC_DCHECK(submodules_.echo_detector); + if (submodules_.echo_detector) { submodules_.echo_detector->AnalyzeCaptureAudio( rtc::ArrayView(capture_buffer->channels()[0], capture_buffer->num_frames())); @@ -1320,12 +1257,11 @@ int AudioProcessingImpl::ProcessCaptureStreamLocked() { capture_buffer->num_channels(), capture_buffer->split_bands_const(0)[kBand0To8kHz], capture_buffer->num_frames_per_band(), - capture_.keyboard_info.keyboard_data, - capture_.keyboard_info.num_keyboard_frames, voice_probability, + /*reference_data=*/nullptr, /*reference_length=*/0, voice_probability, capture_.key_pressed); } - // Experimental APM sub-module that analyzes |capture_buffer|. + // Experimental APM sub-module that analyzes `capture_buffer`. if (submodules_.capture_analyzer) { submodules_.capture_analyzer->Analyze(capture_buffer); } @@ -1340,13 +1276,6 @@ int AudioProcessingImpl::ProcessCaptureStreamLocked() { submodules_.capture_post_processor->Process(capture_buffer); } - // The level estimator operates on the recombined data. - if (config_.level_estimation.enabled) { - submodules_.output_level_estimator->ProcessStream(*capture_buffer); - capture_.stats.output_rms_dbfs = - submodules_.output_level_estimator->RMS(); - } - capture_output_rms_.Analyze(rtc::ArrayView( capture_buffer->channels_const()[0], capture_nonlocked_.capture_processing_format.num_frames())); @@ -1366,8 +1295,7 @@ int AudioProcessingImpl::ProcessCaptureStreamLocked() { } // Compute echo-detector stats. - if (config_.residual_echo_detector.enabled) { - RTC_DCHECK(submodules_.echo_detector); + if (submodules_.echo_detector) { auto ed_metrics = submodules_.echo_detector->GetMetrics(); capture_.stats.residual_echo_likelihood = ed_metrics.echo_likelihood; capture_.stats.residual_echo_likelihood_recent_max = @@ -1436,6 +1364,8 @@ int AudioProcessingImpl::ProcessReverseStream(const float* const* src, float* const* dest) { TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_StreamConfig"); MutexLock lock(&mutex_render_); + DenormalDisabler denormal_disabler(use_denormal_disabler_); + RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, input_config, output_config)); if (submodule_states_.RenderMultiBandProcessingActive() || submodule_states_.RenderFullBandProcessingActive()) { @@ -1473,6 +1403,8 @@ int AudioProcessingImpl::AnalyzeReverseStreamLocked( RTC_DCHECK_EQ(input_config.num_frames(), formats_.api_format.reverse_input_stream().num_frames()); + DenormalDisabler denormal_disabler(use_denormal_disabler_); + if (aec_dump_) { const size_t channel_size = formats_.api_format.reverse_input_stream().num_frames(); @@ -1497,6 +1429,8 @@ int AudioProcessingImpl::ProcessReverseStream(const int16_t* const src, } MutexLock lock(&mutex_render_); + DenormalDisabler denormal_disabler(use_denormal_disabler_); + ProcessingConfig processing_config = formats_.api_format; processing_config.reverse_input_stream().set_sample_rate_hz( input_config.sample_rate_hz()); @@ -1531,6 +1465,7 @@ int AudioProcessingImpl::ProcessRenderStreamLocked() { AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. HandleRenderRuntimeSettings(); + DenormalDisabler denormal_disabler(use_denormal_disabler_); if (submodules_.render_pre_processor) { submodules_.render_pre_processor->Process(render_buffer); @@ -1603,7 +1538,7 @@ bool AudioProcessingImpl::GetLinearAecOutput( return true; } RTC_LOG(LS_ERROR) << "No linear AEC output available"; - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return false; } @@ -1725,11 +1660,11 @@ AudioProcessing::Config AudioProcessingImpl::GetConfig() const { bool AudioProcessingImpl::UpdateActiveSubmoduleStates() { return submodule_states_.Update( config_.high_pass_filter.enabled, !!submodules_.echo_control_mobile, - config_.residual_echo_detector.enabled, !!submodules_.noise_suppressor, - !!submodules_.gain_control, !!submodules_.gain_controller2, + !!submodules_.noise_suppressor, !!submodules_.gain_control, + !!submodules_.gain_controller2, config_.pre_amplifier.enabled || config_.capture_level_adjustment.enabled, capture_nonlocked_.echo_controller_enabled, - config_.voice_detection.enabled, !!submodules_.transient_suppressor); + !!submodules_.transient_suppressor); } void AudioProcessingImpl::InitializeTransientSuppressor() { @@ -1779,14 +1714,6 @@ void AudioProcessingImpl::InitializeHighPassFilter(bool forced_reset) { } } -void AudioProcessingImpl::InitializeVoiceDetector() { - if (config_.voice_detection.enabled) { - submodules_.voice_detector = std::make_unique( - proc_split_sample_rate_hz(), VoiceDetection::kVeryLowLikelihood); - } else { - submodules_.voice_detector.reset(); - } -} void AudioProcessingImpl::InitializeEchoController() { bool use_echo_controller = echo_control_factory_ || @@ -1879,7 +1806,6 @@ void AudioProcessingImpl::InitializeGainController1() { submodules_.gain_control->Initialize(num_proc_channels(), proc_sample_rate_hz()); - if (!config_.gain_controller1.analog_gain_controller.enabled) { int error = submodules_.gain_control->set_mode( Agc1ConfigModeToInterfaceMode(config_.gain_controller1.mode)); @@ -1893,9 +1819,10 @@ void AudioProcessingImpl::InitializeGainController1() { error = submodules_.gain_control->enable_limiter( config_.gain_controller1.enable_limiter); RTC_DCHECK_EQ(kNoError, error); + constexpr int kAnalogLevelMinimum = 0; + constexpr int kAnalogLevelMaximum = 255; error = submodules_.gain_control->set_analog_level_limits( - config_.gain_controller1.analog_level_minimum, - config_.gain_controller1.analog_level_maximum); + kAnalogLevelMinimum, kAnalogLevelMaximum); RTC_DCHECK_EQ(kNoError, error); submodules_.agc_manager.reset(); @@ -1904,9 +1831,7 @@ void AudioProcessingImpl::InitializeGainController1() { if (!submodules_.agc_manager.get() || submodules_.agc_manager->num_channels() != - static_cast(num_proc_channels()) || - submodules_.agc_manager->sample_rate_hz() != - capture_nonlocked_.split_rate) { + static_cast(num_proc_channels())) { int stream_analog_level = -1; const bool re_creation = !!submodules_.agc_manager; if (re_creation) { @@ -1918,7 +1843,6 @@ void AudioProcessingImpl::InitializeGainController1() { config_.gain_controller1.analog_gain_controller.clipped_level_min, !config_.gain_controller1.analog_gain_controller .enable_digital_adaptive, - capture_nonlocked_.split_rate, config_.gain_controller1.analog_gain_controller.clipped_level_step, config_.gain_controller1.analog_gain_controller.clipped_ratio_threshold, config_.gain_controller1.analog_gain_controller.clipped_wait_frames, @@ -1934,19 +1858,18 @@ void AudioProcessingImpl::InitializeGainController1() { capture_.capture_output_used); } -void AudioProcessingImpl::InitializeGainController2() { - if (config_.gain_controller2.enabled) { - if (!submodules_.gain_controller2) { - // TODO(alessiob): Move the injected gain controller once injection is - // implemented. - submodules_.gain_controller2.reset(new GainController2()); - } - - submodules_.gain_controller2->Initialize(proc_fullband_sample_rate_hz(), - num_input_channels()); - submodules_.gain_controller2->ApplyConfig(config_.gain_controller2); - } else { +void AudioProcessingImpl::InitializeGainController2(bool config_has_changed) { + if (!config_has_changed) { + return; + } + if (!config_.gain_controller2.enabled) { submodules_.gain_controller2.reset(); + return; + } + if (!submodules_.gain_controller2 || config_has_changed) { + submodules_.gain_controller2 = std::make_unique( + config_.gain_controller2, proc_fullband_sample_rate_hz(), + num_input_channels()); } } @@ -2003,10 +1926,11 @@ void AudioProcessingImpl::InitializeCaptureLevelsAdjuster() { } void AudioProcessingImpl::InitializeResidualEchoDetector() { - RTC_DCHECK(submodules_.echo_detector); - submodules_.echo_detector->Initialize( - proc_fullband_sample_rate_hz(), 1, - formats_.render_processing_format.sample_rate_hz(), 1); + if (submodules_.echo_detector) { + submodules_.echo_detector->Initialize( + proc_fullband_sample_rate_hz(), 1, + formats_.render_processing_format.sample_rate_hz(), 1); + } } void AudioProcessingImpl::InitializeAnalyzer() { @@ -2171,17 +2095,6 @@ AudioProcessingImpl::ApmCaptureState::ApmCaptureState() AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; -void AudioProcessingImpl::ApmCaptureState::KeyboardInfo::Extract( - const float* const* data, - const StreamConfig& stream_config) { - if (stream_config.has_keyboard()) { - keyboard_data = data[stream_config.num_channels()]; - } else { - keyboard_data = NULL; - } - num_keyboard_frames = stream_config.num_frames(); -} - AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; diff --git a/modules/audio_processing/audio_processing_impl.h b/modules/audio_processing/audio_processing_impl.h index c88cfcde92..344b8c5959 100644 --- a/modules/audio_processing/audio_processing_impl.h +++ b/modules/audio_processing/audio_processing_impl.h @@ -18,9 +18,11 @@ #include #include +#include "api/array_view.h" #include "api/function_view.h" #include "modules/audio_processing/aec3/echo_canceller3.h" #include "modules/audio_processing/agc/agc_manager_direct.h" +#include "modules/audio_processing/agc/analog_gain_stats_reporter.h" #include "modules/audio_processing/agc/gain_control.h" #include "modules/audio_processing/audio_buffer.h" #include "modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster.h" @@ -32,14 +34,11 @@ #include "modules/audio_processing/include/audio_frame_proxies.h" #include "modules/audio_processing/include/audio_processing.h" #include "modules/audio_processing/include/audio_processing_statistics.h" -#include "modules/audio_processing/level_estimator.h" #include "modules/audio_processing/ns/noise_suppressor.h" #include "modules/audio_processing/optionally_built_submodule_creators.h" #include "modules/audio_processing/render_queue_item_verifier.h" -#include "modules/audio_processing/residual_echo_detector.h" #include "modules/audio_processing/rms_level.h" #include "modules/audio_processing/transient/transient_suppressor.h" -#include "modules/audio_processing/voice_detection.h" #include "rtc_base/gtest_prod_util.h" #include "rtc_base/ignore_wundef.h" #include "rtc_base/swap_queue.h" @@ -51,13 +50,16 @@ namespace webrtc { class ApmDataDumper; class AudioConverter; +constexpr int RuntimeSettingQueueSize() { + return 100; +} + class AudioProcessingImpl : public AudioProcessing { public: // Methods forcing APM to run in a single-threaded manner. // Acquires both the render and capture locks. - explicit AudioProcessingImpl(const webrtc::Config& config); - // AudioProcessingImpl takes ownership of capture post processor. - AudioProcessingImpl(const webrtc::Config& config, + AudioProcessingImpl(); + AudioProcessingImpl(const AudioProcessing::Config& config, std::unique_ptr capture_post_processor, std::unique_ptr render_pre_processor, std::unique_ptr echo_control_factory, @@ -65,12 +67,6 @@ class AudioProcessingImpl : public AudioProcessing { std::unique_ptr capture_analyzer); ~AudioProcessingImpl() override; int Initialize() override; - int Initialize(int capture_input_sample_rate_hz, - int capture_output_sample_rate_hz, - int render_sample_rate_hz, - ChannelLayout capture_input_layout, - ChannelLayout capture_output_layout, - ChannelLayout render_input_layout) override; int Initialize(const ProcessingConfig& processing_config) override; void ApplyConfig(const AudioProcessing::Config& config) override; bool CreateAndAttachAecDump(const std::string& file_name, @@ -143,6 +139,11 @@ class AudioProcessingImpl : public AudioProcessing { // Overridden in a mock. virtual void InitializeLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_render_, mutex_capture_); + void AssertLockedForTest() + RTC_ASSERT_EXCLUSIVE_LOCK(mutex_render_, mutex_capture_) { + mutex_render_.AssertHeld(); + mutex_capture_.AssertHeld(); + } private: // TODO(peah): These friend classes should be removed as soon as the new @@ -164,7 +165,7 @@ class AudioProcessingImpl : public AudioProcessing { const ApmSubmoduleCreationOverrides& overrides); // Class providing thread-safe message pipe functionality for - // |runtime_settings_|. + // `runtime_settings_`. class RuntimeSettingEnqueuer { public: explicit RuntimeSettingEnqueuer( @@ -178,10 +179,12 @@ class AudioProcessingImpl : public AudioProcessing { SwapQueue& runtime_settings_; }; - std::unique_ptr data_dumper_; + const std::unique_ptr data_dumper_; static int instance_count_; const bool use_setup_specific_default_aec3_config_; + const bool use_denormal_disabler_; + SwapQueue capture_runtime_settings_; SwapQueue render_runtime_settings_; @@ -189,7 +192,7 @@ class AudioProcessingImpl : public AudioProcessing { RuntimeSettingEnqueuer render_runtime_settings_enqueuer_; // EchoControl factory. - std::unique_ptr echo_control_factory_; + const std::unique_ptr echo_control_factory_; class SubmoduleStates { public: @@ -199,13 +202,11 @@ class AudioProcessingImpl : public AudioProcessing { // Updates the submodule state and returns true if it has changed. bool Update(bool high_pass_filter_enabled, bool mobile_echo_controller_enabled, - bool residual_echo_detector_enabled, bool noise_suppressor_enabled, bool adaptive_gain_controller_enabled, bool gain_controller2_enabled, bool gain_adjustment_enabled, bool echo_controller_enabled, - bool voice_detector_enabled, bool transient_suppressor_enabled); bool CaptureMultiBandSubModulesActive() const; bool CaptureMultiBandProcessingPresent() const; @@ -223,13 +224,11 @@ class AudioProcessingImpl : public AudioProcessing { const bool capture_analyzer_enabled_ = false; bool high_pass_filter_enabled_ = false; bool mobile_echo_controller_enabled_ = false; - bool residual_echo_detector_enabled_ = false; bool noise_suppressor_enabled_ = false; bool adaptive_gain_controller_enabled_ = false; bool gain_controller2_enabled_ = false; bool gain_adjustment_enabled_ = false; bool echo_controller_enabled_ = false; - bool voice_detector_enabled_ = false; bool transient_suppressor_enabled_ = false; bool first_update_ = true; }; @@ -261,15 +260,17 @@ class AudioProcessingImpl : public AudioProcessing { void InitializeEchoController() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_render_, mutex_capture_); - // Initializations of capture-only submodules, requiring the capture lock + // Initializations of capture-only sub-modules, requiring the capture lock // already acquired. void InitializeHighPassFilter(bool forced_reset) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_); - void InitializeVoiceDetector() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_); void InitializeGainController1() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_); void InitializeTransientSuppressor() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_); - void InitializeGainController2() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_); + // Initializes the `GainController2` sub-module. If the sub-module is enabled + // and `config_has_changed` is true, recreates the sub-module. + void InitializeGainController2(bool config_has_changed) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_); void InitializeNoiseSuppressor() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_); void InitializeCaptureLevelsAdjuster() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_); @@ -315,8 +316,8 @@ class AudioProcessingImpl : public AudioProcessing { // Collects configuration settings from public and private // submodules to be saved as an audioproc::Config message on the - // AecDump if it is attached. If not |forced|, only writes the current - // config if it is different from the last saved one; if |forced|, + // AecDump if it is attached. If not `forced`, only writes the current + // config if it is different from the last saved one; if `forced`, // writes the config regardless of the last saved. void WriteAecDumpConfigMessage(bool forced) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_capture_); @@ -383,20 +384,18 @@ class AudioProcessingImpl : public AudioProcessing { render_pre_processor(std::move(render_pre_processor)), capture_analyzer(std::move(capture_analyzer)) {} // Accessed internally from capture or during initialization. + const rtc::scoped_refptr echo_detector; + const std::unique_ptr capture_post_processor; + const std::unique_ptr render_pre_processor; + const std::unique_ptr capture_analyzer; std::unique_ptr agc_manager; std::unique_ptr gain_control; std::unique_ptr gain_controller2; std::unique_ptr high_pass_filter; - rtc::scoped_refptr echo_detector; std::unique_ptr echo_controller; std::unique_ptr echo_control_mobile; std::unique_ptr noise_suppressor; std::unique_ptr transient_suppressor; - std::unique_ptr capture_post_processor; - std::unique_ptr render_pre_processor; - std::unique_ptr capture_analyzer; - std::unique_ptr output_level_estimator; - std::unique_ptr voice_detector; std::unique_ptr capture_levels_adjuster; } submodules_; @@ -408,10 +407,10 @@ class AudioProcessingImpl : public AudioProcessing { struct ApmFormatState { ApmFormatState() : // Format of processing streams at input/output call sites. - api_format({{{kSampleRate16kHz, 1, false}, - {kSampleRate16kHz, 1, false}, - {kSampleRate16kHz, 1, false}, - {kSampleRate16kHz, 1, false}}}), + api_format({{{kSampleRate16kHz, 1}, + {kSampleRate16kHz, 1}, + {kSampleRate16kHz, 1}, + {kSampleRate16kHz, 1}}}), render_processing_format(kSampleRate16kHz, 1) {} ProcessingConfig api_format; StreamConfig render_processing_format; @@ -458,11 +457,6 @@ class AudioProcessingImpl : public AudioProcessing { int playout_volume; int prev_playout_volume; AudioProcessingStats stats; - struct KeyboardInfo { - void Extract(const float* const* data, const StreamConfig& stream_config); - size_t num_keyboard_frames = 0; - const float* keyboard_data = nullptr; - } keyboard_info; int cached_stream_analog_level_ = 0; } capture_ RTC_GUARDED_BY(mutex_capture_); @@ -524,6 +518,9 @@ class AudioProcessingImpl : public AudioProcessing { RmsLevel capture_output_rms_ RTC_GUARDED_BY(mutex_capture_); int capture_rms_interval_counter_ RTC_GUARDED_BY(mutex_capture_) = 0; + AnalogGainStatsReporter analog_gain_stats_reporter_ + RTC_GUARDED_BY(mutex_capture_); + // Lock protection not needed. std::unique_ptr< SwapQueue, RenderQueueItemVerifier>> diff --git a/modules/audio_processing/audio_processing_impl_locking_unittest.cc b/modules/audio_processing/audio_processing_impl_locking_unittest.cc index 66c1251d4c..7557e919d6 100644 --- a/modules/audio_processing/audio_processing_impl_locking_unittest.cc +++ b/modules/audio_processing/audio_processing_impl_locking_unittest.cc @@ -24,9 +24,11 @@ #include "test/gtest.h" namespace webrtc { - namespace { +constexpr int kMaxFrameSize = 480; +constexpr int kTestTimeOutLimit = 10 * 60 * 1000; + class AudioProcessingImplLockTest; // Type of the render thread APM API call to use in the test. @@ -305,14 +307,14 @@ class CaptureProcessor { rtc::Event* render_call_event, rtc::Event* capture_call_event, FrameCounters* shared_counters_state, - TestConfig* test_config, + const TestConfig* test_config, AudioProcessing* apm); void Process(); private: - static const int kMaxCallDifference = 10; - static const float kCaptureInputFloatLevel; - static const int kCaptureInputFixLevel = 1024; + static constexpr int kMaxCallDifference = 10; + static constexpr float kCaptureInputFloatLevel = 0.03125f; + static constexpr int kCaptureInputFixLevel = 1024; void PrepareFrame(); void CallApmCaptureSide(); @@ -331,13 +333,13 @@ class CaptureProcessor { class StatsProcessor { public: StatsProcessor(RandomGenerator* rand_gen, - TestConfig* test_config, + const TestConfig* test_config, AudioProcessing* apm); void Process(); private: RandomGenerator* rand_gen_ = nullptr; - TestConfig* test_config_ = nullptr; + const TestConfig* const test_config_ = nullptr; AudioProcessing* apm_ = nullptr; }; @@ -349,14 +351,14 @@ class RenderProcessor { rtc::Event* render_call_event, rtc::Event* capture_call_event, FrameCounters* shared_counters_state, - TestConfig* test_config, + const TestConfig* test_config, AudioProcessing* apm); void Process(); private: - static const int kMaxCallDifference = 10; - static const int kRenderInputFixLevel = 16384; - static const float kRenderInputFloatLevel; + static constexpr int kMaxCallDifference = 10; + static constexpr int kRenderInputFixLevel = 16384; + static constexpr float kRenderInputFloatLevel = 0.5f; void PrepareFrame(); void CallApmRenderSide(); @@ -380,10 +382,6 @@ class AudioProcessingImplLockTest bool MaybeEndTest(); private: - static const int kTestTimeOutLimit = 10 * 60 * 1000; - static const int kMaxFrameSize = 480; - - // ::testing::TestWithParam<> implementation void SetUp() override; void TearDown() override; @@ -428,8 +426,8 @@ class AudioProcessingImplLockTest // Thread related variables. mutable RandomGenerator rand_gen_; - std::unique_ptr apm_; - TestConfig test_config_; + const TestConfig test_config_; + rtc::scoped_refptr apm_; FrameCounters frame_counters_; RenderProcessor render_thread_state_; CaptureProcessor capture_thread_state_; @@ -476,8 +474,23 @@ void PopulateAudioFrame(float amplitude, } } +AudioProcessing::Config GetApmTestConfig(AecType aec_type) { + AudioProcessing::Config apm_config; + apm_config.echo_canceller.enabled = aec_type != AecType::AecTurnedOff; + apm_config.echo_canceller.mobile_mode = + aec_type == AecType::BasicWebRtcAecSettingsWithAecMobile; + apm_config.gain_controller1.enabled = true; + apm_config.gain_controller1.mode = + AudioProcessing::Config::GainController1::kAdaptiveDigital; + apm_config.noise_suppression.enabled = true; + return apm_config; +} + AudioProcessingImplLockTest::AudioProcessingImplLockTest() - : apm_(AudioProcessingBuilderForTesting().Create()), + : test_config_(GetParam()), + apm_(AudioProcessingBuilderForTesting() + .SetConfig(GetApmTestConfig(test_config_.aec_type)) + .Create()), render_thread_state_(kMaxFrameSize, &rand_gen_, &render_call_event_, @@ -508,23 +521,7 @@ bool AudioProcessingImplLockTest::MaybeEndTest() { return false; } -// Setup of test and APM. -void AudioProcessingImplLockTest::SetUp() { - test_config_ = static_cast(GetParam()); - - AudioProcessing::Config apm_config = apm_->GetConfig(); - apm_config.echo_canceller.enabled = - (test_config_.aec_type != AecType::AecTurnedOff); - apm_config.echo_canceller.mobile_mode = - (test_config_.aec_type == AecType::BasicWebRtcAecSettingsWithAecMobile); - apm_config.gain_controller1.enabled = true; - apm_config.gain_controller1.mode = - AudioProcessing::Config::GainController1::kAdaptiveDigital; - apm_config.noise_suppression.enabled = true; - apm_config.voice_detection.enabled = true; - apm_config.level_estimation.enabled = true; - apm_->ApplyConfig(apm_config); -} +void AudioProcessingImplLockTest::SetUp() {} void AudioProcessingImplLockTest::TearDown() { render_call_event_.Set(); @@ -532,7 +529,7 @@ void AudioProcessingImplLockTest::TearDown() { } StatsProcessor::StatsProcessor(RandomGenerator* rand_gen, - TestConfig* test_config, + const TestConfig* test_config, AudioProcessing* apm) : rand_gen_(rand_gen), test_config_(test_config), apm_(apm) {} @@ -557,14 +554,12 @@ void StatsProcessor::Process() { apm_->GetStatistics(); } -const float CaptureProcessor::kCaptureInputFloatLevel = 0.03125f; - CaptureProcessor::CaptureProcessor(int max_frame_size, RandomGenerator* rand_gen, rtc::Event* render_call_event, rtc::Event* capture_call_event, FrameCounters* shared_counters_state, - TestConfig* test_config, + const TestConfig* test_config, AudioProcessing* apm) : rand_gen_(rand_gen), render_call_event_(render_call_event), @@ -611,8 +606,7 @@ void CaptureProcessor::PrepareFrame() { // Prepare the audio data. StreamConfig input_stream_config(frame_data_.input_sample_rate_hz, - frame_data_.input_number_of_channels, - /*has_keyboard=*/false); + frame_data_.input_number_of_channels); PopulateAudioFrame(kCaptureInputFixLevel, input_stream_config.num_channels(), input_stream_config.num_frames(), frame_data_.frame, @@ -636,11 +630,9 @@ void CaptureProcessor::CallApmCaptureSide() { // Call the specified capture side API processing method. StreamConfig input_stream_config(frame_data_.input_sample_rate_hz, - frame_data_.input_number_of_channels, - /*has_keyboard=*/false); + frame_data_.input_number_of_channels); StreamConfig output_stream_config(frame_data_.output_sample_rate_hz, - frame_data_.output_number_of_channels, - /*has_keyboard=*/false); + frame_data_.output_number_of_channels); int result = AudioProcessing::kNoError; switch (test_config_->capture_api_function) { case CaptureApiImpl::ProcessStreamImplInteger: @@ -788,14 +780,12 @@ void CaptureProcessor::ApplyRuntimeSettingScheme() { frame_data_.input_number_of_channels); } -const float RenderProcessor::kRenderInputFloatLevel = 0.5f; - RenderProcessor::RenderProcessor(int max_frame_size, RandomGenerator* rand_gen, rtc::Event* render_call_event, rtc::Event* capture_call_event, FrameCounters* shared_counters_state, - TestConfig* test_config, + const TestConfig* test_config, AudioProcessing* apm) : rand_gen_(rand_gen), render_call_event_(render_call_event), @@ -853,8 +843,7 @@ void RenderProcessor::PrepareFrame() { // Prepare the audio data. StreamConfig input_stream_config(frame_data_.input_sample_rate_hz, - frame_data_.input_number_of_channels, - /*has_keyboard=*/false); + frame_data_.input_number_of_channels); PopulateAudioFrame(kRenderInputFixLevel, input_stream_config.num_channels(), input_stream_config.num_frames(), frame_data_.frame, @@ -872,11 +861,9 @@ void RenderProcessor::CallApmRenderSide() { // Call the specified render side API processing method. StreamConfig input_stream_config(frame_data_.input_sample_rate_hz, - frame_data_.input_number_of_channels, - /*has_keyboard=*/false); + frame_data_.input_number_of_channels); StreamConfig output_stream_config(frame_data_.output_sample_rate_hz, - frame_data_.output_number_of_channels, - /*has_keyboard=*/false); + frame_data_.output_number_of_channels); int result = AudioProcessing::kNoError; switch (test_config_->render_api_function) { case RenderApiImpl::ProcessReverseStreamImplInteger: @@ -1004,7 +991,7 @@ void RenderProcessor::ApplyRuntimeSettingScheme() { frame_data_.input_number_of_channels); } -} // anonymous namespace +} // namespace TEST_P(AudioProcessingImplLockTest, LockTest) { // Run test and verify that it did not time out. diff --git a/modules/audio_processing/audio_processing_impl_unittest.cc b/modules/audio_processing/audio_processing_impl_unittest.cc index ca8b8b4c25..33d46fb4f8 100644 --- a/modules/audio_processing/audio_processing_impl_unittest.cc +++ b/modules/audio_processing/audio_processing_impl_unittest.cc @@ -14,7 +14,6 @@ #include #include "api/scoped_refptr.h" -#include "modules/audio_processing/common.h" #include "modules/audio_processing/include/audio_processing.h" #include "modules/audio_processing/optionally_built_submodule_creators.h" #include "modules/audio_processing/test/audio_processing_builder_for_testing.h" @@ -35,11 +34,11 @@ using ::testing::NotNull; class MockInitialize : public AudioProcessingImpl { public: - explicit MockInitialize(const webrtc::Config& config) - : AudioProcessingImpl(config) {} + MockInitialize() : AudioProcessingImpl() {} MOCK_METHOD(void, InitializeLocked, (), (override)); - void RealInitializeLocked() RTC_NO_THREAD_SAFETY_ANALYSIS { + void RealInitializeLocked() { + AssertLockedForTest(); AudioProcessingImpl::InitializeLocked(); } @@ -131,49 +130,48 @@ class TestRenderPreProcessor : public CustomProcessing { } // namespace TEST(AudioProcessingImplTest, AudioParameterChangeTriggersInit) { - webrtc::Config webrtc_config; - MockInitialize mock(webrtc_config); - ON_CALL(mock, InitializeLocked()) + MockInitialize mock; + ON_CALL(mock, InitializeLocked) .WillByDefault(Invoke(&mock, &MockInitialize::RealInitializeLocked)); - EXPECT_CALL(mock, InitializeLocked()).Times(1); + EXPECT_CALL(mock, InitializeLocked).Times(1); mock.Initialize(); constexpr size_t kMaxSampleRateHz = 32000; constexpr size_t kMaxNumChannels = 2; std::array frame; frame.fill(0); - StreamConfig config(16000, 1, /*has_keyboard=*/false); + StreamConfig config(16000, 1); // Call with the default parameters; there should be an init. - EXPECT_CALL(mock, InitializeLocked()).Times(0); + EXPECT_CALL(mock, InitializeLocked).Times(0); EXPECT_NOERR(mock.ProcessStream(frame.data(), config, config, frame.data())); EXPECT_NOERR( mock.ProcessReverseStream(frame.data(), config, config, frame.data())); // New sample rate. (Only impacts ProcessStream). - config = StreamConfig(32000, 1, /*has_keyboard=*/false); - EXPECT_CALL(mock, InitializeLocked()).Times(1); + config = StreamConfig(32000, 1); + EXPECT_CALL(mock, InitializeLocked).Times(1); EXPECT_NOERR(mock.ProcessStream(frame.data(), config, config, frame.data())); // New number of channels. // TODO(peah): Investigate why this causes 2 inits. - config = StreamConfig(32000, 2, /*has_keyboard=*/false); - EXPECT_CALL(mock, InitializeLocked()).Times(2); + config = StreamConfig(32000, 2); + EXPECT_CALL(mock, InitializeLocked).Times(2); EXPECT_NOERR(mock.ProcessStream(frame.data(), config, config, frame.data())); // ProcessStream sets num_channels_ == num_output_channels. EXPECT_NOERR( mock.ProcessReverseStream(frame.data(), config, config, frame.data())); // A new sample rate passed to ProcessReverseStream should cause an init. - config = StreamConfig(16000, 2, /*has_keyboard=*/false); - EXPECT_CALL(mock, InitializeLocked()).Times(1); + config = StreamConfig(16000, 2); + EXPECT_CALL(mock, InitializeLocked).Times(1); EXPECT_NOERR( mock.ProcessReverseStream(frame.data(), config, config, frame.data())); } TEST(AudioProcessingImplTest, UpdateCapturePreGainRuntimeSetting) { - std::unique_ptr apm( - AudioProcessingBuilderForTesting().Create()); + rtc::scoped_refptr apm = + AudioProcessingBuilderForTesting().Create(); webrtc::AudioProcessing::Config apm_config; apm_config.pre_amplifier.enabled = true; apm_config.pre_amplifier.fixed_gain_factor = 1.f; @@ -184,7 +182,7 @@ TEST(AudioProcessingImplTest, UpdateCapturePreGainRuntimeSetting) { constexpr size_t kNumChannels = 2; std::array frame; - StreamConfig config(kSampleRateHz, kNumChannels, /*has_keyboard=*/false); + StreamConfig config(kSampleRateHz, kNumChannels); frame.fill(kAudioLevel); apm->ProcessStream(frame.data(), config, config, frame.data()); EXPECT_EQ(frame[100], kAudioLevel) @@ -205,8 +203,8 @@ TEST(AudioProcessingImplTest, UpdateCapturePreGainRuntimeSetting) { TEST(AudioProcessingImplTest, LevelAdjustmentUpdateCapturePreGainRuntimeSetting) { - std::unique_ptr apm( - AudioProcessingBuilderForTesting().Create()); + rtc::scoped_refptr apm = + AudioProcessingBuilderForTesting().Create(); webrtc::AudioProcessing::Config apm_config; apm_config.capture_level_adjustment.enabled = true; apm_config.capture_level_adjustment.pre_gain_factor = 1.f; @@ -217,7 +215,7 @@ TEST(AudioProcessingImplTest, constexpr size_t kNumChannels = 2; std::array frame; - StreamConfig config(kSampleRateHz, kNumChannels, /*has_keyboard=*/false); + StreamConfig config(kSampleRateHz, kNumChannels); frame.fill(kAudioLevel); apm->ProcessStream(frame.data(), config, config, frame.data()); EXPECT_EQ(frame[100], kAudioLevel) @@ -238,8 +236,8 @@ TEST(AudioProcessingImplTest, TEST(AudioProcessingImplTest, LevelAdjustmentUpdateCapturePostGainRuntimeSetting) { - std::unique_ptr apm( - AudioProcessingBuilderForTesting().Create()); + rtc::scoped_refptr apm = + AudioProcessingBuilderForTesting().Create(); webrtc::AudioProcessing::Config apm_config; apm_config.capture_level_adjustment.enabled = true; apm_config.capture_level_adjustment.post_gain_factor = 1.f; @@ -250,7 +248,7 @@ TEST(AudioProcessingImplTest, constexpr size_t kNumChannels = 2; std::array frame; - StreamConfig config(kSampleRateHz, kNumChannels, /*has_keyboard=*/false); + StreamConfig config(kSampleRateHz, kNumChannels); frame.fill(kAudioLevel); apm->ProcessStream(frame.data(), config, config, frame.data()); EXPECT_EQ(frame[100], kAudioLevel) @@ -276,16 +274,16 @@ TEST(AudioProcessingImplTest, EchoControllerObservesSetCaptureUsageChange) { const MockEchoControlFactory* echo_control_factory_ptr = echo_control_factory.get(); - std::unique_ptr apm( + rtc::scoped_refptr apm = AudioProcessingBuilderForTesting() .SetEchoControlFactory(std::move(echo_control_factory)) - .Create()); + .Create(); constexpr int16_t kAudioLevel = 10000; constexpr int kSampleRateHz = 48000; constexpr int kNumChannels = 2; std::array frame; - StreamConfig config(kSampleRateHz, kNumChannels, /*has_keyboard=*/false); + StreamConfig config(kSampleRateHz, kNumChannels); frame.fill(kAudioLevel); MockEchoControl* echo_control_mock = echo_control_factory_ptr->GetNext(); @@ -358,10 +356,10 @@ TEST(AudioProcessingImplTest, auto echo_control_factory = std::make_unique(); const auto* echo_control_factory_ptr = echo_control_factory.get(); - std::unique_ptr apm( + rtc::scoped_refptr apm = AudioProcessingBuilderForTesting() .SetEchoControlFactory(std::move(echo_control_factory)) - .Create()); + .Create(); // Disable AGC. webrtc::AudioProcessing::Config apm_config; apm_config.gain_controller1.enabled = false; @@ -374,7 +372,7 @@ TEST(AudioProcessingImplTest, constexpr size_t kSampleRateHz = 48000; constexpr size_t kNumChannels = 2; std::array frame; - StreamConfig config(kSampleRateHz, kNumChannels, /*has_keyboard=*/false); + StreamConfig config(kSampleRateHz, kNumChannels); frame.fill(kAudioLevel); MockEchoControl* echo_control_mock = echo_control_factory_ptr->GetNext(); @@ -401,10 +399,10 @@ TEST(AudioProcessingImplTest, auto echo_control_factory = std::make_unique(); const auto* echo_control_factory_ptr = echo_control_factory.get(); - std::unique_ptr apm( + rtc::scoped_refptr apm = AudioProcessingBuilderForTesting() .SetEchoControlFactory(std::move(echo_control_factory)) - .Create()); + .Create(); // Disable AGC. webrtc::AudioProcessing::Config apm_config; apm_config.gain_controller1.enabled = false; @@ -417,7 +415,7 @@ TEST(AudioProcessingImplTest, constexpr size_t kSampleRateHz = 48000; constexpr size_t kNumChannels = 2; std::array frame; - StreamConfig config(kSampleRateHz, kNumChannels, /*has_keyboard=*/false); + StreamConfig config(kSampleRateHz, kNumChannels); frame.fill(kAudioLevel); MockEchoControl* echo_control_mock = echo_control_factory_ptr->GetNext(); @@ -444,10 +442,10 @@ TEST(AudioProcessingImplTest, auto echo_control_factory = std::make_unique(); const auto* echo_control_factory_ptr = echo_control_factory.get(); - std::unique_ptr apm( + rtc::scoped_refptr apm = AudioProcessingBuilderForTesting() .SetEchoControlFactory(std::move(echo_control_factory)) - .Create()); + .Create(); webrtc::AudioProcessing::Config apm_config; // Enable AGC1. apm_config.gain_controller1.enabled = true; @@ -461,8 +459,7 @@ TEST(AudioProcessingImplTest, constexpr size_t kSampleRateHz = 48000; constexpr size_t kNumChannels = 2; std::array frame; - StreamConfig stream_config(kSampleRateHz, kNumChannels, - /*has_keyboard=*/false); + StreamConfig stream_config(kSampleRateHz, kNumChannels); frame.fill(kAudioLevel); MockEchoControl* echo_control_mock = echo_control_factory_ptr->GetNext(); @@ -490,10 +487,10 @@ TEST(AudioProcessingImplTest, EchoControllerObservesPlayoutVolumeChange) { auto echo_control_factory = std::make_unique(); const auto* echo_control_factory_ptr = echo_control_factory.get(); - std::unique_ptr apm( + rtc::scoped_refptr apm = AudioProcessingBuilderForTesting() .SetEchoControlFactory(std::move(echo_control_factory)) - .Create()); + .Create(); // Disable AGC. webrtc::AudioProcessing::Config apm_config; apm_config.gain_controller1.enabled = false; @@ -504,8 +501,7 @@ TEST(AudioProcessingImplTest, EchoControllerObservesPlayoutVolumeChange) { constexpr size_t kSampleRateHz = 48000; constexpr size_t kNumChannels = 2; std::array frame; - StreamConfig stream_config(kSampleRateHz, kNumChannels, - /*has_keyboard=*/false); + StreamConfig stream_config(kSampleRateHz, kNumChannels); frame.fill(kAudioLevel); MockEchoControl* echo_control_mock = echo_control_factory_ptr->GetNext(); @@ -548,14 +544,13 @@ TEST(AudioProcessingImplTest, RenderPreProcessorBeforeEchoDetector) { std::unique_ptr test_render_pre_processor( new TestRenderPreProcessor()); // Create APM injecting the test echo detector and render pre-processor. - std::unique_ptr apm( + rtc::scoped_refptr apm = AudioProcessingBuilderForTesting() .SetEchoDetector(test_echo_detector) .SetRenderPreProcessing(std::move(test_render_pre_processor)) - .Create()); + .Create(); webrtc::AudioProcessing::Config apm_config; apm_config.pre_amplifier.enabled = true; - apm_config.residual_echo_detector.enabled = true; apm->ApplyConfig(apm_config); constexpr int16_t kAudioLevel = 1000; @@ -563,16 +558,15 @@ TEST(AudioProcessingImplTest, RenderPreProcessorBeforeEchoDetector) { constexpr size_t kNumChannels = 1; // Explicitly initialize APM to ensure no render frames are discarded. const ProcessingConfig processing_config = {{ - {kSampleRateHz, kNumChannels, /*has_keyboard=*/false}, - {kSampleRateHz, kNumChannels, /*has_keyboard=*/false}, - {kSampleRateHz, kNumChannels, /*has_keyboard=*/false}, - {kSampleRateHz, kNumChannels, /*has_keyboard=*/false}, + {kSampleRateHz, kNumChannels}, + {kSampleRateHz, kNumChannels}, + {kSampleRateHz, kNumChannels}, + {kSampleRateHz, kNumChannels}, }}; apm->Initialize(processing_config); std::array frame; - StreamConfig stream_config(kSampleRateHz, kNumChannels, - /*has_keyboard=*/false); + StreamConfig stream_config(kSampleRateHz, kNumChannels); constexpr float kAudioLevelFloat = static_cast(kAudioLevel); constexpr float kExpectedPreprocessedAudioLevel = @@ -604,7 +598,7 @@ TEST(AudioProcessingImplTest, RenderPreProcessorBeforeEchoDetector) { // config should be bit-exact with running APM with said submodules disabled. // This mainly tests that SetCreateOptionalSubmodulesForTesting has an effect. TEST(ApmWithSubmodulesExcludedTest, BitexactWithDisabledModules) { - auto apm = rtc::make_ref_counted(webrtc::Config()); + auto apm = rtc::make_ref_counted(); ASSERT_EQ(apm->Initialize(), AudioProcessing::kNoError); ApmSubmoduleCreationOverrides overrides; @@ -628,8 +622,7 @@ TEST(ApmWithSubmodulesExcludedTest, BitexactWithDisabledModules) { float* channel_pointers[] = {buffer.data()}; float* channel_pointers_reference[] = {buffer_reference.data()}; StreamConfig stream_config(/*sample_rate_hz=*/kSampleRateHz, - /*num_channels=*/kNumChannels, - /*has_keyboard=*/false); + /*num_channels=*/kNumChannels); Random random_generator(2341U); constexpr int kFramesToProcessPerConfiguration = 10; @@ -652,7 +645,7 @@ TEST(ApmWithSubmodulesExcludedTest, BitexactWithDisabledModules) { // Disable transient suppressor creation and run APM in ways that should trigger // calls to the transient suppressor API. TEST(ApmWithSubmodulesExcludedTest, ReinitializeTransientSuppressor) { - auto apm = rtc::make_ref_counted(webrtc::Config()); + auto apm = rtc::make_ref_counted(); ASSERT_EQ(apm->Initialize(), kNoErr); ApmSubmoduleCreationOverrides overrides; @@ -669,8 +662,7 @@ TEST(ApmWithSubmodulesExcludedTest, ReinitializeTransientSuppressor) { constexpr int kFramesToProcessPerConfiguration = 3; StreamConfig initial_stream_config(/*sample_rate_hz=*/16000, - /*num_channels=*/1, - /*has_keyboard=*/false); + /*num_channels=*/1); for (int i = 0; i < kFramesToProcessPerConfiguration; ++i) { RandomizeSampleVector(&random_generator, buffer); EXPECT_EQ(apm->ProcessStream(channel_pointers, initial_stream_config, @@ -679,8 +671,7 @@ TEST(ApmWithSubmodulesExcludedTest, ReinitializeTransientSuppressor) { } StreamConfig stereo_stream_config(/*sample_rate_hz=*/16000, - /*num_channels=*/2, - /*has_keyboard=*/false); + /*num_channels=*/2); for (int i = 0; i < kFramesToProcessPerConfiguration; ++i) { RandomizeSampleVector(&random_generator, buffer); EXPECT_EQ(apm->ProcessStream(channel_pointers, stereo_stream_config, @@ -689,8 +680,7 @@ TEST(ApmWithSubmodulesExcludedTest, ReinitializeTransientSuppressor) { } StreamConfig high_sample_rate_stream_config(/*sample_rate_hz=*/48000, - /*num_channels=*/1, - /*has_keyboard=*/false); + /*num_channels=*/2); for (int i = 0; i < kFramesToProcessPerConfiguration; ++i) { RandomizeSampleVector(&random_generator, buffer); EXPECT_EQ( @@ -698,22 +688,12 @@ TEST(ApmWithSubmodulesExcludedTest, ReinitializeTransientSuppressor) { high_sample_rate_stream_config, channel_pointers), kNoErr); } - - StreamConfig keyboard_stream_config(/*sample_rate_hz=*/16000, - /*num_channels=*/1, - /*has_keyboard=*/true); - for (int i = 0; i < kFramesToProcessPerConfiguration; ++i) { - RandomizeSampleVector(&random_generator, buffer); - EXPECT_EQ(apm->ProcessStream(channel_pointers, keyboard_stream_config, - keyboard_stream_config, channel_pointers), - kNoErr); - } } // Disable transient suppressor creation and run APM in ways that should trigger // calls to the transient suppressor API. TEST(ApmWithSubmodulesExcludedTest, ToggleTransientSuppressor) { - auto apm = rtc::make_ref_counted(webrtc::Config()); + auto apm = rtc::make_ref_counted(); ASSERT_EQ(apm->Initialize(), AudioProcessing::kNoError); ApmSubmoduleCreationOverrides overrides; @@ -726,8 +706,7 @@ TEST(ApmWithSubmodulesExcludedTest, ToggleTransientSuppressor) { Random random_generator(2341U); constexpr int kFramesToProcessPerConfiguration = 3; StreamConfig stream_config(/*sample_rate_hz=*/16000, - /*num_channels=*/1, - /*has_keyboard=*/false); + /*num_channels=*/1); AudioProcessing::Config config = apm->GetConfig(); config.transient_suppression.enabled = true; diff --git a/modules/audio_processing/audio_processing_performance_unittest.cc b/modules/audio_processing/audio_processing_performance_unittest.cc index 9585850296..57655aea6d 100644 --- a/modules/audio_processing/audio_processing_performance_unittest.cc +++ b/modules/audio_processing/audio_processing_performance_unittest.cc @@ -343,7 +343,6 @@ class TimedThreadApiProcessor { frame_data_.input_stream_config.set_sample_rate_hz( simulation_config_->sample_rate_hz); frame_data_.input_stream_config.set_num_channels(num_channels_); - frame_data_.input_stream_config.set_has_keyboard(false); populate_audio_frame(input_level_, num_channels_, (simulation_config_->sample_rate_hz * AudioProcessing::kChunkSizeMs / 1000), @@ -353,7 +352,6 @@ class TimedThreadApiProcessor { frame_data_.output_stream_config.set_sample_rate_hz( simulation_config_->sample_rate_hz); frame_data_.output_stream_config.set_num_channels(1); - frame_data_.output_stream_config.set_has_keyboard(false); } bool ReadyToProcess() { @@ -367,7 +365,7 @@ class TimedThreadApiProcessor { // Should not be reached, but the return statement is needed for the code to // build successfully on Android. - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return false; } @@ -443,8 +441,6 @@ class CallSimulator : public ::testing::TestWithParam { apm_config.gain_controller1.enabled = true; apm_config.gain_controller1.mode = AudioProcessing::Config::GainController1::kAdaptiveDigital; - apm_config.level_estimation.enabled = true; - apm_config.voice_detection.enabled = true; apm->ApplyConfig(apm_config); }; @@ -456,8 +452,6 @@ class CallSimulator : public ::testing::TestWithParam { apm_config.noise_suppression.enabled = true; apm_config.gain_controller1.mode = AudioProcessing::Config::GainController1::kAdaptiveDigital; - apm_config.level_estimation.enabled = true; - apm_config.voice_detection.enabled = true; apm->ApplyConfig(apm_config); }; @@ -467,43 +461,38 @@ class CallSimulator : public ::testing::TestWithParam { AudioProcessing::Config apm_config = apm->GetConfig(); apm_config.echo_canceller.enabled = false; apm_config.gain_controller1.enabled = false; - apm_config.level_estimation.enabled = false; apm_config.noise_suppression.enabled = false; - apm_config.voice_detection.enabled = false; apm->ApplyConfig(apm_config); }; int num_capture_channels = 1; switch (simulation_config_.simulation_settings) { case SettingsType::kDefaultApmMobile: { - apm_.reset(AudioProcessingBuilderForTesting().Create()); + apm_ = AudioProcessingBuilderForTesting().Create(); ASSERT_TRUE(!!apm_); set_default_mobile_apm_runtime_settings(apm_.get()); break; } case SettingsType::kDefaultApmDesktop: { - Config config; - apm_.reset(AudioProcessingBuilderForTesting().Create(config)); + apm_ = AudioProcessingBuilderForTesting().Create(); ASSERT_TRUE(!!apm_); set_default_desktop_apm_runtime_settings(apm_.get()); break; } case SettingsType::kAllSubmodulesTurnedOff: { - apm_.reset(AudioProcessingBuilderForTesting().Create()); + apm_ = AudioProcessingBuilderForTesting().Create(); ASSERT_TRUE(!!apm_); turn_off_default_apm_runtime_settings(apm_.get()); break; } case SettingsType::kDefaultApmDesktopWithoutDelayAgnostic: { - Config config; - apm_.reset(AudioProcessingBuilderForTesting().Create(config)); + apm_ = AudioProcessingBuilderForTesting().Create(); ASSERT_TRUE(!!apm_); set_default_desktop_apm_runtime_settings(apm_.get()); break; } case SettingsType::kDefaultApmDesktopWithoutExtendedFilter: { - Config config; - apm_.reset(AudioProcessingBuilderForTesting().Create(config)); + apm_ = AudioProcessingBuilderForTesting().Create(); ASSERT_TRUE(!!apm_); set_default_desktop_apm_runtime_settings(apm_.get()); break; @@ -544,7 +533,7 @@ class CallSimulator : public ::testing::TestWithParam { // Thread related variables. Random rand_gen_; - std::unique_ptr apm_; + rtc::scoped_refptr apm_; const SimulationConfig simulation_config_; FrameCounters frame_counters_; LockedFlag capture_call_checker_; diff --git a/modules/audio_processing/audio_processing_unittest.cc b/modules/audio_processing/audio_processing_unittest.cc index 4d30a348f6..b21a0227c5 100644 --- a/modules/audio_processing/audio_processing_unittest.cc +++ b/modules/audio_processing/audio_processing_unittest.cc @@ -20,13 +20,13 @@ #include #include "absl/flags/flag.h" +#include "api/audio/echo_detector_creator.h" #include "common_audio/include/audio_util.h" #include "common_audio/resampler/include/push_resampler.h" #include "common_audio/resampler/push_sinc_resampler.h" #include "common_audio/signal_processing/include/signal_processing_library.h" #include "modules/audio_processing/aec_dump/aec_dump_factory.h" #include "modules/audio_processing/audio_processing_impl.h" -#include "modules/audio_processing/common.h" #include "modules/audio_processing/include/mock_audio_processing.h" #include "modules/audio_processing/test/audio_processing_builder_for_testing.h" #include "modules/audio_processing/test/protobuf_utils.h" @@ -94,21 +94,6 @@ void ConvertToFloat(const Int16FrameData& frame, ChannelBuffer* cb) { ConvertToFloat(frame.data.data(), cb); } -// Number of channels including the keyboard channel. -size_t TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) { - switch (layout) { - case AudioProcessing::kMono: - return 1; - case AudioProcessing::kMonoAndKeyboard: - case AudioProcessing::kStereo: - return 2; - case AudioProcessing::kStereoAndKeyboard: - return 3; - } - RTC_NOTREACHED(); - return 0; -} - void MixStereoToMono(const float* stereo, float* mono, size_t samples_per_channel) { @@ -200,15 +185,11 @@ void EnableAllAPComponents(AudioProcessing* ap) { apm_config.gain_controller1.enabled = true; apm_config.gain_controller1.mode = AudioProcessing::Config::GainController1::kAdaptiveAnalog; - apm_config.gain_controller1.analog_level_minimum = 0; - apm_config.gain_controller1.analog_level_maximum = 255; #endif apm_config.noise_suppression.enabled = true; apm_config.high_pass_filter.enabled = true; - apm_config.level_estimation.enabled = true; - apm_config.voice_detection.enabled = true; apm_config.pipeline.maximum_internal_processing_rate = 48000; ap->ApplyConfig(apm_config); } @@ -275,7 +256,7 @@ std::string OutputFilePath(const std::string& name, } else if (num_output_channels == 2) { ss << "stereo"; } else { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } ss << output_rate / 1000; if (num_reverse_output_channels == 1) { @@ -283,7 +264,7 @@ std::string OutputFilePath(const std::string& name, } else if (num_reverse_output_channels == 2) { ss << "_rstereo"; } else { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } ss << reverse_output_rate / 1000; ss << "_d" << file_direction << "_pcm"; @@ -321,10 +302,10 @@ void OpenFileAndReadMessage(const std::string& filename, MessageLite* msg) { // Reads a 10 ms chunk of int16 interleaved audio from the given (assumed // stereo) file, converts to deinterleaved float (optionally downmixing) and -// returns the result in |cb|. Returns false if the file ended (or on error) and +// returns the result in `cb`. Returns false if the file ended (or on error) and // true otherwise. // -// |int_data| and |float_data| are just temporary space that must be +// `int_data` and `float_data` are just temporary space that must be // sufficiently large to hold the 10 ms chunk. bool ReadChunk(FILE* file, int16_t* int_data, @@ -415,11 +396,11 @@ class ApmTest : public ::testing::Test { const std::string output_path_; const std::string ref_filename_; - std::unique_ptr apm_; + rtc::scoped_refptr apm_; Int16FrameData frame_; Int16FrameData revframe_; - std::unique_ptr > float_cb_; - std::unique_ptr > revfloat_cb_; + std::unique_ptr> float_cb_; + std::unique_ptr> revfloat_cb_; int output_sample_rate_hz_; size_t num_output_channels_; FILE* far_file_; @@ -435,7 +416,7 @@ ApmTest::ApmTest() far_file_(NULL), near_file_(NULL), out_file_(NULL) { - apm_.reset(AudioProcessingBuilderForTesting().Create()); + apm_ = AudioProcessingBuilderForTesting().Create(); AudioProcessing::Config apm_config = apm_->GetConfig(); apm_config.gain_controller1.analog_gain_controller.enabled = false; apm_config.pipeline.maximum_internal_processing_rate = 48000; @@ -596,7 +577,7 @@ void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms, int delay_min, int delay_max) { - // The |revframe_| and |frame_| should include the proper frame information, + // The `revframe_` and `frame_` should include the proper frame information, // hence can be used for extracting information. Int16FrameData tmp_frame; std::queue frame_queue; @@ -606,7 +587,7 @@ void ApmTest::ProcessDelayVerificationTest(int delay_ms, SetFrameTo(&tmp_frame, 0); EXPECT_EQ(apm_->kNoError, apm_->Initialize()); - // Initialize the |frame_queue| with empty frames. + // Initialize the `frame_queue` with empty frames. int frame_delay = delay_ms / 10; while (frame_delay < 0) { Int16FrameData* frame = new Int16FrameData(); @@ -1080,18 +1061,15 @@ TEST_F(ApmTest, GainControl) { EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat)); } - // Testing level limits - std::array kMinLevels = {0, 0, 255, 65000}; - std::array kMaxLevels = {255, 1024, 65535, 65535}; - for (size_t i = 0; i < kMinLevels.size(); ++i) { - int min_level = kMinLevels[i]; - int max_level = kMaxLevels[i]; - config.gain_controller1.analog_level_minimum = min_level; - config.gain_controller1.analog_level_maximum = max_level; - apm_->ApplyConfig(config); - apm_->set_stream_analog_level((min_level + max_level) / 2); - EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat)); - } + // Testing level limits. + constexpr int kMinLevel = 0; + constexpr int kMaxLevel = 255; + apm_->set_stream_analog_level(kMinLevel); + EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat)); + apm_->set_stream_analog_level((kMinLevel + kMaxLevel) / 2); + EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat)); + apm_->set_stream_analog_level(kMaxLevel); + EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(kFloatFormat)); } #if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) @@ -1125,44 +1103,18 @@ TEST_F(ApmDeathTest, GainControlDiesOnTooHighCompressionGainDb) { EXPECT_DEATH(apm_->ApplyConfig(config), ""); } -TEST_F(ApmDeathTest, GainControlDiesOnTooLowAnalogLevelLowerLimit) { - auto config = apm_->GetConfig(); - config.gain_controller1.enabled = true; - config.gain_controller1.analog_level_minimum = -1; - EXPECT_DEATH(apm_->ApplyConfig(config), ""); -} - -TEST_F(ApmDeathTest, GainControlDiesOnTooHighAnalogLevelUpperLimit) { - auto config = apm_->GetConfig(); - config.gain_controller1.enabled = true; - config.gain_controller1.analog_level_maximum = 65536; - EXPECT_DEATH(apm_->ApplyConfig(config), ""); -} - -TEST_F(ApmDeathTest, GainControlDiesOnInvertedAnalogLevelLimits) { - auto config = apm_->GetConfig(); - config.gain_controller1.enabled = true; - config.gain_controller1.analog_level_minimum = 512; - config.gain_controller1.analog_level_maximum = 255; - EXPECT_DEATH(apm_->ApplyConfig(config), ""); -} - TEST_F(ApmDeathTest, ApmDiesOnTooLowAnalogLevel) { auto config = apm_->GetConfig(); config.gain_controller1.enabled = true; - config.gain_controller1.analog_level_minimum = 255; - config.gain_controller1.analog_level_maximum = 512; apm_->ApplyConfig(config); - EXPECT_DEATH(apm_->set_stream_analog_level(254), ""); + EXPECT_DEATH(apm_->set_stream_analog_level(-1), ""); } TEST_F(ApmDeathTest, ApmDiesOnTooHighAnalogLevel) { auto config = apm_->GetConfig(); config.gain_controller1.enabled = true; - config.gain_controller1.analog_level_minimum = 255; - config.gain_controller1.analog_level_maximum = 512; apm_->ApplyConfig(config); - EXPECT_DEATH(apm_->set_stream_analog_level(513), ""); + EXPECT_DEATH(apm_->set_stream_analog_level(256), ""); } #endif @@ -1272,9 +1224,7 @@ TEST_F(ApmTest, AllProcessingDisabledByDefault) { EXPECT_FALSE(config.echo_canceller.enabled); EXPECT_FALSE(config.high_pass_filter.enabled); EXPECT_FALSE(config.gain_controller1.enabled); - EXPECT_FALSE(config.level_estimation.enabled); EXPECT_FALSE(config.noise_suppression.enabled); - EXPECT_FALSE(config.voice_detection.enabled); } TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabled) { @@ -1312,7 +1262,7 @@ TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabledFloat) { auto src_channels = &src[0]; auto dest_channels = &dest[0]; - apm_.reset(AudioProcessingBuilderForTesting().Create()); + apm_ = AudioProcessingBuilderForTesting().Create(); EXPECT_NOERR(apm_->ProcessStream(&src_channels, StreamConfig(sample_rate, 1), StreamConfig(sample_rate, 1), &dest_channels)); @@ -1399,7 +1349,6 @@ TEST_F(ApmTest, SplittingFilter) { auto apm_config = apm_->GetConfig(); SetFrameTo(&frame_, 1000); frame_copy.CopyFrom(frame_); - apm_config.level_estimation.enabled = true; apm_->ApplyConfig(apm_config); EXPECT_EQ(apm_->kNoError, apm_->ProcessStream( @@ -1414,51 +1363,6 @@ TEST_F(ApmTest, SplittingFilter) { StreamConfig(frame_.sample_rate_hz, frame_.num_channels), frame_.data.data())); EXPECT_TRUE(FrameDataAreEqual(frame_, frame_copy)); - apm_config.level_estimation.enabled = false; - apm_->ApplyConfig(apm_config); - - // 3. Only GetStatistics-reporting VAD is enabled... - SetFrameTo(&frame_, 1000); - frame_copy.CopyFrom(frame_); - apm_config.voice_detection.enabled = true; - apm_->ApplyConfig(apm_config); - EXPECT_EQ(apm_->kNoError, - apm_->ProcessStream( - frame_.data.data(), - StreamConfig(frame_.sample_rate_hz, frame_.num_channels), - StreamConfig(frame_.sample_rate_hz, frame_.num_channels), - frame_.data.data())); - EXPECT_EQ(apm_->kNoError, - apm_->ProcessStream( - frame_.data.data(), - StreamConfig(frame_.sample_rate_hz, frame_.num_channels), - StreamConfig(frame_.sample_rate_hz, frame_.num_channels), - frame_.data.data())); - EXPECT_TRUE(FrameDataAreEqual(frame_, frame_copy)); - apm_config.voice_detection.enabled = false; - apm_->ApplyConfig(apm_config); - - // 4. Both the VAD and the level estimator are enabled... - SetFrameTo(&frame_, 1000); - frame_copy.CopyFrom(frame_); - apm_config.voice_detection.enabled = true; - apm_config.level_estimation.enabled = true; - apm_->ApplyConfig(apm_config); - EXPECT_EQ(apm_->kNoError, - apm_->ProcessStream( - frame_.data.data(), - StreamConfig(frame_.sample_rate_hz, frame_.num_channels), - StreamConfig(frame_.sample_rate_hz, frame_.num_channels), - frame_.data.data())); - EXPECT_EQ(apm_->kNoError, - apm_->ProcessStream( - frame_.data.data(), - StreamConfig(frame_.sample_rate_hz, frame_.num_channels), - StreamConfig(frame_.sample_rate_hz, frame_.num_channels), - frame_.data.data())); - EXPECT_TRUE(FrameDataAreEqual(frame_, frame_copy)); - apm_config.voice_detection.enabled = false; - apm_config.level_estimation.enabled = false; apm_->ApplyConfig(apm_config); // Check the test is valid. We should have distortion from the filter @@ -1773,7 +1677,9 @@ TEST_F(ApmTest, Process) { if (test->num_input_channels() != test->num_output_channels()) continue; - apm_.reset(AudioProcessingBuilderForTesting().Create()); + apm_ = AudioProcessingBuilderForTesting() + .SetEchoDetector(CreateEchoDetector()) + .Create(); AudioProcessing::Config apm_config = apm_->GetConfig(); apm_config.gain_controller1.analog_gain_controller.enabled = false; apm_->ApplyConfig(apm_config); @@ -1786,11 +1692,9 @@ TEST_F(ApmTest, Process) { static_cast(test->num_reverse_channels()), true); int frame_count = 0; - int has_voice_count = 0; int analog_level = 127; int analog_level_average = 0; int max_output_average = 0; - float rms_dbfs_average = 0.0f; #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) int stats_index = 0; #endif @@ -1823,10 +1727,6 @@ TEST_F(ApmTest, Process) { analog_level = apm_->recommended_stream_analog_level(); analog_level_average += analog_level; AudioProcessingStats stats = apm_->GetStatistics(); - EXPECT_TRUE(stats.voice_detected); - EXPECT_TRUE(stats.output_rms_dbfs); - has_voice_count += *stats.voice_detected ? 1 : 0; - rms_dbfs_average += *stats.output_rms_dbfs; size_t frame_size = frame_.samples_per_channel * frame_.num_channels; size_t write_count = @@ -1841,16 +1741,16 @@ TEST_F(ApmTest, Process) { const int kStatsAggregationFrameNum = 100; // 1 second. if (frame_count % kStatsAggregationFrameNum == 0) { // Get echo and delay metrics. - AudioProcessingStats stats = apm_->GetStatistics(); + AudioProcessingStats stats2 = apm_->GetStatistics(); // Echo metrics. - const float echo_return_loss = stats.echo_return_loss.value_or(-1.0f); + const float echo_return_loss = stats2.echo_return_loss.value_or(-1.0f); const float echo_return_loss_enhancement = - stats.echo_return_loss_enhancement.value_or(-1.0f); + stats2.echo_return_loss_enhancement.value_or(-1.0f); const float residual_echo_likelihood = - stats.residual_echo_likelihood.value_or(-1.0f); + stats2.residual_echo_likelihood.value_or(-1.0f); const float residual_echo_likelihood_recent_max = - stats.residual_echo_likelihood_recent_max.value_or(-1.0f); + stats2.residual_echo_likelihood_recent_max.value_or(-1.0f); if (!absl::GetFlag(FLAGS_write_apm_ref_data)) { const audioproc::Test::EchoMetrics& reference = @@ -1879,47 +1779,28 @@ TEST_F(ApmTest, Process) { } max_output_average /= frame_count; analog_level_average /= frame_count; - rms_dbfs_average /= frame_count; if (!absl::GetFlag(FLAGS_write_apm_ref_data)) { const int kIntNear = 1; - // When running the test on a N7 we get a {2, 6} difference of - // |has_voice_count| and |max_output_average| is up to 18 higher. - // All numbers being consistently higher on N7 compare to ref_data. + // All numbers being consistently higher on N7 compare to the reference + // data. // TODO(bjornv): If we start getting more of these offsets on Android we // should consider a different approach. Either using one slack for all, // or generate a separate android reference. #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) - const int kHasVoiceCountOffset = 3; - const int kHasVoiceCountNear = 8; const int kMaxOutputAverageOffset = 9; const int kMaxOutputAverageNear = 26; #else - const int kHasVoiceCountOffset = 0; - const int kHasVoiceCountNear = kIntNear; const int kMaxOutputAverageOffset = 0; const int kMaxOutputAverageNear = kIntNear; #endif - EXPECT_NEAR(test->has_voice_count(), - has_voice_count - kHasVoiceCountOffset, kHasVoiceCountNear); - EXPECT_NEAR(test->analog_level_average(), analog_level_average, kIntNear); EXPECT_NEAR(test->max_output_average(), max_output_average - kMaxOutputAverageOffset, kMaxOutputAverageNear); -#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) - const double kFloatNear = 0.002; - EXPECT_NEAR(test->rms_dbfs_average(), rms_dbfs_average, kFloatNear); -#endif } else { - test->set_has_voice_count(has_voice_count); - test->set_analog_level_average(analog_level_average); test->set_max_output_average(max_output_average); - -#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) - test->set_rms_dbfs_average(rms_dbfs_average); -#endif } rewind(far_file_); @@ -1931,44 +1812,6 @@ TEST_F(ApmTest, Process) { } } -TEST_F(ApmTest, NoErrorsWithKeyboardChannel) { - struct ChannelFormat { - AudioProcessing::ChannelLayout in_layout; - AudioProcessing::ChannelLayout out_layout; - }; - ChannelFormat cf[] = { - {AudioProcessing::kMonoAndKeyboard, AudioProcessing::kMono}, - {AudioProcessing::kStereoAndKeyboard, AudioProcessing::kMono}, - {AudioProcessing::kStereoAndKeyboard, AudioProcessing::kStereo}, - }; - - std::unique_ptr ap( - AudioProcessingBuilderForTesting().Create()); - // Enable one component just to ensure some processing takes place. - AudioProcessing::Config config; - config.noise_suppression.enabled = true; - ap->ApplyConfig(config); - for (size_t i = 0; i < arraysize(cf); ++i) { - const int in_rate = 44100; - const int out_rate = 48000; - ChannelBuffer in_cb(SamplesFromRate(in_rate), - TotalChannelsFromLayout(cf[i].in_layout)); - ChannelBuffer out_cb(SamplesFromRate(out_rate), - ChannelsFromLayout(cf[i].out_layout)); - bool has_keyboard = cf[i].in_layout == AudioProcessing::kMonoAndKeyboard || - cf[i].in_layout == AudioProcessing::kStereoAndKeyboard; - StreamConfig in_sc(in_rate, ChannelsFromLayout(cf[i].in_layout), - has_keyboard); - StreamConfig out_sc(out_rate, ChannelsFromLayout(cf[i].out_layout)); - - // Run over a few chunks. - for (int j = 0; j < 10; ++j) { - EXPECT_NOERR(ap->ProcessStream(in_cb.channels(), in_sc, out_sc, - out_cb.channels())); - } - } -} - // Compares the reference and test arrays over a region around the expected // delay. Finds the highest SNR in that region and adds the variance and squared // error results to the supplied accumulators. @@ -2058,7 +1901,7 @@ class AudioProcessingTest static void TearDownTestSuite() { ClearTempFiles(); } // Runs a process pass on files with the given parameters and dumps the output - // to a file specified with |output_file_prefix|. Both forward and reverse + // to a file specified with `output_file_prefix`. Both forward and reverse // output streams are dumped. static void ProcessFormat(int input_rate, int output_rate, @@ -2069,8 +1912,8 @@ class AudioProcessingTest size_t num_reverse_input_channels, size_t num_reverse_output_channels, const std::string& output_file_prefix) { - std::unique_ptr ap( - AudioProcessingBuilderForTesting().Create()); + rtc::scoped_refptr ap = + AudioProcessingBuilderForTesting().Create(); AudioProcessing::Config apm_config = ap->GetConfig(); apm_config.gain_controller1.analog_gain_controller.enabled = false; ap->ApplyConfig(apm_config); @@ -2277,7 +2120,7 @@ TEST_P(AudioProcessingTest, Formats) { out_ptr = cmp_data.get(); } - // Update the |sq_error| and |variance| accumulators with the highest + // Update the `sq_error` and `variance` accumulators with the highest // SNR of reference vs output. UpdateBestSNR(ref_data.get(), out_ptr, ref_length, expected_delay, &variance, &sq_error); @@ -2454,8 +2297,8 @@ void RunApmRateAndChannelTest( rtc::ArrayView sample_rates_hz, rtc::ArrayView render_channel_counts, rtc::ArrayView capture_channel_counts) { - std::unique_ptr apm( - AudioProcessingBuilderForTesting().Create()); + rtc::scoped_refptr apm = + AudioProcessingBuilderForTesting().Create(); webrtc::AudioProcessing::Config apm_config; apm_config.echo_canceller.enabled = true; apm->ApplyConfig(apm_config); @@ -2489,7 +2332,6 @@ void RunApmRateAndChannelTest( std::vector* frame_data) { cfg->set_sample_rate_hz(sample_rate_hz); cfg->set_num_channels(num_channels); - cfg->set_has_keyboard(false); size_t max_frame_size = ceil(sample_rate_hz / 100.f); channels_data->resize(num_channels * max_frame_size); @@ -2673,14 +2515,13 @@ class MyEchoControlFactory : public EchoControlFactory { TEST(ApmConfiguration, EchoControlInjection) { // Verify that apm uses an injected echo controller if one is provided. - webrtc::Config webrtc_config; std::unique_ptr echo_control_factory( new MyEchoControlFactory()); rtc::scoped_refptr apm = AudioProcessingBuilderForTesting() .SetEchoControlFactory(std::move(echo_control_factory)) - .Create(webrtc_config); + .Create(); Int16FrameData audio; audio.num_channels = 1; @@ -2699,10 +2540,75 @@ TEST(ApmConfiguration, EchoControlInjection) { audio.data.data()); } -std::unique_ptr CreateApm(bool mobile_aec) { - Config old_config; - std::unique_ptr apm( - AudioProcessingBuilderForTesting().Create(old_config)); +TEST(ApmConfiguration, EchoDetectorInjection) { + using ::testing::_; + rtc::scoped_refptr mock_echo_detector = + rtc::make_ref_counted<::testing::StrictMock>(); + EXPECT_CALL(*mock_echo_detector, + Initialize(/*capture_sample_rate_hz=*/16000, _, + /*render_sample_rate_hz=*/16000, _)) + .Times(1); + rtc::scoped_refptr apm = + AudioProcessingBuilderForTesting() + .SetEchoDetector(mock_echo_detector) + .Create(); + + // The echo detector is included in processing when enabled. + EXPECT_CALL(*mock_echo_detector, AnalyzeRenderAudio(_)) + .WillOnce([](rtc::ArrayView render_audio) { + EXPECT_EQ(render_audio.size(), 160u); + }); + EXPECT_CALL(*mock_echo_detector, AnalyzeCaptureAudio(_)) + .WillOnce([](rtc::ArrayView capture_audio) { + EXPECT_EQ(capture_audio.size(), 160u); + }); + EXPECT_CALL(*mock_echo_detector, GetMetrics()).Times(1); + + Int16FrameData frame; + frame.num_channels = 1; + SetFrameSampleRate(&frame, 16000); + + apm->ProcessReverseStream(frame.data.data(), StreamConfig(16000, 1), + StreamConfig(16000, 1), frame.data.data()); + apm->ProcessStream(frame.data.data(), StreamConfig(16000, 1), + StreamConfig(16000, 1), frame.data.data()); + + // When processing rates change, the echo detector is also reinitialized to + // match those. + EXPECT_CALL(*mock_echo_detector, + Initialize(/*capture_sample_rate_hz=*/48000, _, + /*render_sample_rate_hz=*/16000, _)) + .Times(1); + EXPECT_CALL(*mock_echo_detector, + Initialize(/*capture_sample_rate_hz=*/48000, _, + /*render_sample_rate_hz=*/48000, _)) + .Times(1); + EXPECT_CALL(*mock_echo_detector, AnalyzeRenderAudio(_)) + .WillOnce([](rtc::ArrayView render_audio) { + EXPECT_EQ(render_audio.size(), 480u); + }); + EXPECT_CALL(*mock_echo_detector, AnalyzeCaptureAudio(_)) + .Times(2) + .WillRepeatedly([](rtc::ArrayView capture_audio) { + EXPECT_EQ(capture_audio.size(), 480u); + }); + EXPECT_CALL(*mock_echo_detector, GetMetrics()).Times(2); + + SetFrameSampleRate(&frame, 48000); + apm->ProcessStream(frame.data.data(), StreamConfig(48000, 1), + StreamConfig(48000, 1), frame.data.data()); + apm->ProcessReverseStream(frame.data.data(), StreamConfig(48000, 1), + StreamConfig(48000, 1), frame.data.data()); + apm->ProcessStream(frame.data.data(), StreamConfig(48000, 1), + StreamConfig(48000, 1), frame.data.data()); +} + +rtc::scoped_refptr CreateApm(bool mobile_aec) { + // Enable residual echo detection, for stats. + rtc::scoped_refptr apm = + AudioProcessingBuilderForTesting() + .SetEchoDetector(CreateEchoDetector()) + .Create(); if (!apm) { return apm; } @@ -2714,17 +2620,14 @@ std::unique_ptr CreateApm(bool mobile_aec) { return nullptr; } - // Disable all components except for an AEC and the residual echo detector. + // Disable all components except for an AEC. AudioProcessing::Config apm_config; - apm_config.residual_echo_detector.enabled = true; apm_config.high_pass_filter.enabled = false; apm_config.gain_controller1.enabled = false; apm_config.gain_controller2.enabled = false; apm_config.echo_canceller.enabled = true; apm_config.echo_canceller.mobile_mode = mobile_aec; apm_config.noise_suppression.enabled = false; - apm_config.level_estimation.enabled = false; - apm_config.voice_detection.enabled = false; apm->ApplyConfig(apm_config); return apm; } @@ -2737,7 +2640,7 @@ std::unique_ptr CreateApm(bool mobile_aec) { TEST(MAYBE_ApmStatistics, AECEnabledTest) { // Set up APM with AEC3 and process some audio. - std::unique_ptr apm = CreateApm(false); + rtc::scoped_refptr apm = CreateApm(false); ASSERT_TRUE(apm); AudioProcessing::Config apm_config; apm_config.echo_canceller.enabled = true; @@ -2774,21 +2677,21 @@ TEST(MAYBE_ApmStatistics, AECEnabledTest) { // Test statistics interface. AudioProcessingStats stats = apm->GetStatistics(); // We expect all statistics to be set and have a sensible value. - ASSERT_TRUE(stats.residual_echo_likelihood); + ASSERT_TRUE(stats.residual_echo_likelihood.has_value()); EXPECT_GE(*stats.residual_echo_likelihood, 0.0); EXPECT_LE(*stats.residual_echo_likelihood, 1.0); - ASSERT_TRUE(stats.residual_echo_likelihood_recent_max); + ASSERT_TRUE(stats.residual_echo_likelihood_recent_max.has_value()); EXPECT_GE(*stats.residual_echo_likelihood_recent_max, 0.0); EXPECT_LE(*stats.residual_echo_likelihood_recent_max, 1.0); - ASSERT_TRUE(stats.echo_return_loss); + ASSERT_TRUE(stats.echo_return_loss.has_value()); EXPECT_NE(*stats.echo_return_loss, -100.0); - ASSERT_TRUE(stats.echo_return_loss_enhancement); + ASSERT_TRUE(stats.echo_return_loss_enhancement.has_value()); EXPECT_NE(*stats.echo_return_loss_enhancement, -100.0); } TEST(MAYBE_ApmStatistics, AECMEnabledTest) { // Set up APM with AECM and process some audio. - std::unique_ptr apm = CreateApm(true); + rtc::scoped_refptr apm = CreateApm(true); ASSERT_TRUE(apm); // Set up an audioframe. @@ -2823,24 +2726,19 @@ TEST(MAYBE_ApmStatistics, AECMEnabledTest) { AudioProcessingStats stats = apm->GetStatistics(); // We expect only the residual echo detector statistics to be set and have a // sensible value. - EXPECT_TRUE(stats.residual_echo_likelihood); - if (stats.residual_echo_likelihood) { - EXPECT_GE(*stats.residual_echo_likelihood, 0.0); - EXPECT_LE(*stats.residual_echo_likelihood, 1.0); - } - EXPECT_TRUE(stats.residual_echo_likelihood_recent_max); - if (stats.residual_echo_likelihood_recent_max) { - EXPECT_GE(*stats.residual_echo_likelihood_recent_max, 0.0); - EXPECT_LE(*stats.residual_echo_likelihood_recent_max, 1.0); - } - EXPECT_FALSE(stats.echo_return_loss); - EXPECT_FALSE(stats.echo_return_loss_enhancement); + ASSERT_TRUE(stats.residual_echo_likelihood.has_value()); + EXPECT_GE(*stats.residual_echo_likelihood, 0.0); + EXPECT_LE(*stats.residual_echo_likelihood, 1.0); + ASSERT_TRUE(stats.residual_echo_likelihood_recent_max.has_value()); + EXPECT_GE(*stats.residual_echo_likelihood_recent_max, 0.0); + EXPECT_LE(*stats.residual_echo_likelihood_recent_max, 1.0); + EXPECT_FALSE(stats.echo_return_loss.has_value()); + EXPECT_FALSE(stats.echo_return_loss_enhancement.has_value()); } -TEST(ApmStatistics, ReportOutputRmsDbfs) { +TEST(ApmStatistics, DoNotReportVoiceDetectedStat) { ProcessingConfig processing_config = { {{32000, 1}, {32000, 1}, {32000, 1}, {32000, 1}}}; - AudioProcessing::Config config; // Set up an audioframe. Int16FrameData frame; @@ -2853,95 +2751,57 @@ TEST(ApmStatistics, ReportOutputRmsDbfs) { ptr[i] = 10000 * ((i % 3) - 1); } - std::unique_ptr apm( - AudioProcessingBuilderForTesting().Create()); + rtc::scoped_refptr apm = + AudioProcessingBuilderForTesting().Create(); apm->Initialize(processing_config); - // If not enabled, no metric should be reported. - EXPECT_EQ( - apm->ProcessStream(frame.data.data(), - StreamConfig(frame.sample_rate_hz, frame.num_channels), - StreamConfig(frame.sample_rate_hz, frame.num_channels), - frame.data.data()), - 0); - EXPECT_FALSE(apm->GetStatistics().output_rms_dbfs); - - // If enabled, metrics should be reported. - config.level_estimation.enabled = true; - apm->ApplyConfig(config); - EXPECT_EQ( - apm->ProcessStream(frame.data.data(), - StreamConfig(frame.sample_rate_hz, frame.num_channels), - StreamConfig(frame.sample_rate_hz, frame.num_channels), - frame.data.data()), - 0); - auto stats = apm->GetStatistics(); - EXPECT_TRUE(stats.output_rms_dbfs); - EXPECT_GE(*stats.output_rms_dbfs, 0); - - // If re-disabled, the value is again not reported. - config.level_estimation.enabled = false; - apm->ApplyConfig(config); + // No metric should be reported. EXPECT_EQ( apm->ProcessStream(frame.data.data(), StreamConfig(frame.sample_rate_hz, frame.num_channels), StreamConfig(frame.sample_rate_hz, frame.num_channels), frame.data.data()), 0); - EXPECT_FALSE(apm->GetStatistics().output_rms_dbfs); + EXPECT_FALSE(apm->GetStatistics().voice_detected.has_value()); } -TEST(ApmStatistics, ReportHasVoice) { - ProcessingConfig processing_config = { - {{32000, 1}, {32000, 1}, {32000, 1}, {32000, 1}}}; - AudioProcessing::Config config; - - // Set up an audioframe. +TEST(ApmStatistics, GetStatisticsReportsNoEchoDetectorStatsWhenDisabled) { + rtc::scoped_refptr apm = + AudioProcessingBuilderForTesting().Create(); Int16FrameData frame; frame.num_channels = 1; SetFrameSampleRate(&frame, AudioProcessing::NativeRate::kSampleRate32kHz); - - // Fill the audio frame with a sawtooth pattern. - int16_t* ptr = frame.data.data(); - for (size_t i = 0; i < frame.kMaxDataSizeSamples; i++) { - ptr[i] = 10000 * ((i % 3) - 1); - } - - std::unique_ptr apm( - AudioProcessingBuilderForTesting().Create()); - apm->Initialize(processing_config); - - // If not enabled, no metric should be reported. - EXPECT_EQ( - apm->ProcessStream(frame.data.data(), - StreamConfig(frame.sample_rate_hz, frame.num_channels), - StreamConfig(frame.sample_rate_hz, frame.num_channels), - frame.data.data()), - 0); - EXPECT_FALSE(apm->GetStatistics().voice_detected); - - // If enabled, metrics should be reported. - config.voice_detection.enabled = true; - apm->ApplyConfig(config); - EXPECT_EQ( + ASSERT_EQ( apm->ProcessStream(frame.data.data(), StreamConfig(frame.sample_rate_hz, frame.num_channels), StreamConfig(frame.sample_rate_hz, frame.num_channels), frame.data.data()), 0); - auto stats = apm->GetStatistics(); - EXPECT_TRUE(stats.voice_detected); + // Echo detector is disabled by default, no stats reported. + AudioProcessingStats stats = apm->GetStatistics(); + EXPECT_FALSE(stats.residual_echo_likelihood.has_value()); + EXPECT_FALSE(stats.residual_echo_likelihood_recent_max.has_value()); +} - // If re-disabled, the value is again not reported. - config.voice_detection.enabled = false; - apm->ApplyConfig(config); - EXPECT_EQ( +TEST(ApmStatistics, GetStatisticsReportsEchoDetectorStatsWhenEnabled) { + // Create APM with an echo detector injected. + rtc::scoped_refptr apm = + AudioProcessingBuilderForTesting() + .SetEchoDetector(CreateEchoDetector()) + .Create(); + Int16FrameData frame; + frame.num_channels = 1; + SetFrameSampleRate(&frame, AudioProcessing::NativeRate::kSampleRate32kHz); + // Echo detector enabled: Report stats. + ASSERT_EQ( apm->ProcessStream(frame.data.data(), StreamConfig(frame.sample_rate_hz, frame.num_channels), StreamConfig(frame.sample_rate_hz, frame.num_channels), frame.data.data()), 0); - EXPECT_FALSE(apm->GetStatistics().voice_detected); + AudioProcessingStats stats = apm->GetStatistics(); + EXPECT_TRUE(stats.residual_echo_likelihood.has_value()); + EXPECT_TRUE(stats.residual_echo_likelihood_recent_max.has_value()); } TEST(ApmConfiguration, HandlingOfRateAndChannelCombinations) { @@ -3003,14 +2863,6 @@ TEST(AudioProcessing, GainController1ConfigEqual) { b.enable_limiter = a.enable_limiter; EXPECT_EQ(a, b); - a.analog_level_minimum++; - b.analog_level_minimum = a.analog_level_minimum; - EXPECT_EQ(a, b); - - a.analog_level_maximum--; - b.analog_level_maximum = a.analog_level_maximum; - EXPECT_EQ(a, b); - auto& a_analog = a.analog_gain_controller; auto& b_analog = b.analog_gain_controller; @@ -3057,14 +2909,6 @@ TEST(AudioProcessing, GainController1ConfigNotEqual) { EXPECT_NE(a, b); a = b; - a.analog_level_minimum++; - EXPECT_NE(a, b); - a = b; - - a.analog_level_maximum--; - EXPECT_NE(a, b); - a = b; - auto& a_analog = a.analog_gain_controller; const auto& b_analog = b.analog_gain_controller; @@ -3109,9 +2953,16 @@ TEST(AudioProcessing, GainController2ConfigEqual) { b_adaptive.dry_run = a_adaptive.dry_run; EXPECT_EQ(a, b); - a_adaptive.noise_estimator = AudioProcessing::Config::GainController2:: - NoiseEstimator::kStationaryNoise; - b_adaptive.noise_estimator = a_adaptive.noise_estimator; + a_adaptive.headroom_db += 1.0f; + b_adaptive.headroom_db = a_adaptive.headroom_db; + EXPECT_EQ(a, b); + + a_adaptive.max_gain_db += 1.0f; + b_adaptive.max_gain_db = a_adaptive.max_gain_db; + EXPECT_EQ(a, b); + + a_adaptive.initial_gain_db += 1.0f; + b_adaptive.initial_gain_db = a_adaptive.initial_gain_db; EXPECT_EQ(a, b); a_adaptive.vad_reset_period_ms++; @@ -3132,18 +2983,6 @@ TEST(AudioProcessing, GainController2ConfigEqual) { b_adaptive.max_output_noise_level_dbfs = a_adaptive.max_output_noise_level_dbfs; EXPECT_EQ(a, b); - - Toggle(a_adaptive.sse2_allowed); - b_adaptive.sse2_allowed = a_adaptive.sse2_allowed; - EXPECT_EQ(a, b); - - Toggle(a_adaptive.avx2_allowed); - b_adaptive.avx2_allowed = a_adaptive.avx2_allowed; - EXPECT_EQ(a, b); - - Toggle(a_adaptive.neon_allowed); - b_adaptive.neon_allowed = a_adaptive.neon_allowed; - EXPECT_EQ(a, b); } // Checks that one differing parameter is sufficient to make two configs @@ -3171,36 +3010,31 @@ TEST(AudioProcessing, GainController2ConfigNotEqual) { EXPECT_NE(a, b); a_adaptive = b_adaptive; - a_adaptive.noise_estimator = AudioProcessing::Config::GainController2:: - NoiseEstimator::kStationaryNoise; + a_adaptive.headroom_db += 1.0f; EXPECT_NE(a, b); a_adaptive = b_adaptive; - a_adaptive.vad_reset_period_ms++; - EXPECT_NE(a, b); - a_adaptive = b_adaptive; - - a_adaptive.adjacent_speech_frames_threshold++; + a_adaptive.max_gain_db += 1.0f; EXPECT_NE(a, b); a_adaptive = b_adaptive; - a_adaptive.max_gain_change_db_per_second += 1.0f; + a_adaptive.initial_gain_db += 1.0f; EXPECT_NE(a, b); a_adaptive = b_adaptive; - a_adaptive.max_output_noise_level_dbfs += 1.0f; + a_adaptive.vad_reset_period_ms++; EXPECT_NE(a, b); a_adaptive = b_adaptive; - Toggle(a_adaptive.sse2_allowed); + a_adaptive.adjacent_speech_frames_threshold++; EXPECT_NE(a, b); a_adaptive = b_adaptive; - Toggle(a_adaptive.avx2_allowed); + a_adaptive.max_gain_change_db_per_second += 1.0f; EXPECT_NE(a, b); a_adaptive = b_adaptive; - Toggle(a_adaptive.neon_allowed); + a_adaptive.max_output_noise_level_dbfs += 1.0f; EXPECT_NE(a, b); a_adaptive = b_adaptive; } diff --git a/modules/audio_processing/common.h b/modules/audio_processing/common.h deleted file mode 100644 index 2c88c4e46c..0000000000 --- a/modules/audio_processing/common.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef MODULES_AUDIO_PROCESSING_COMMON_H_ -#define MODULES_AUDIO_PROCESSING_COMMON_H_ - -#include "modules/audio_processing/include/audio_processing.h" -#include "rtc_base/checks.h" - -namespace webrtc { - -constexpr int RuntimeSettingQueueSize() { - return 100; -} - -static inline size_t ChannelsFromLayout(AudioProcessing::ChannelLayout layout) { - switch (layout) { - case AudioProcessing::kMono: - case AudioProcessing::kMonoAndKeyboard: - return 1; - case AudioProcessing::kStereo: - case AudioProcessing::kStereoAndKeyboard: - return 2; - } - RTC_NOTREACHED(); - return 0; -} - -} // namespace webrtc - -#endif // MODULES_AUDIO_PROCESSING_COMMON_H_ diff --git a/modules/audio_processing/config_unittest.cc b/modules/audio_processing/config_unittest.cc deleted file mode 100644 index 19e9ab37d2..0000000000 --- a/modules/audio_processing/config_unittest.cc +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#include "modules/audio_processing/include/config.h" - -#include "test/gtest.h" - -namespace webrtc { -namespace { - -struct MyExperiment { - static const ConfigOptionID identifier = ConfigOptionID::kMyExperimentForTest; - static const int kDefaultFactor; - static const int kDefaultOffset; - - MyExperiment() : factor(kDefaultFactor), offset(kDefaultOffset) {} - - MyExperiment(int factor, int offset) : factor(factor), offset(offset) {} - - int factor; - int offset; -}; - -const int MyExperiment::kDefaultFactor = 1; -const int MyExperiment::kDefaultOffset = 2; - -TEST(Config, ReturnsDefaultInstanceIfNotConfigured) { - Config config; - const MyExperiment& my_exp = config.Get(); - EXPECT_EQ(MyExperiment::kDefaultFactor, my_exp.factor); - EXPECT_EQ(MyExperiment::kDefaultOffset, my_exp.offset); -} - -TEST(Config, ReturnOptionWhenSet) { - Config config; - config.Set(new MyExperiment(5, 1)); - const MyExperiment& my_exp = config.Get(); - EXPECT_EQ(5, my_exp.factor); - EXPECT_EQ(1, my_exp.offset); -} - -TEST(Config, SetNullSetsTheOptionBackToDefault) { - Config config; - config.Set(new MyExperiment(5, 1)); - config.Set(NULL); - const MyExperiment& my_exp = config.Get(); - EXPECT_EQ(MyExperiment::kDefaultFactor, my_exp.factor); - EXPECT_EQ(MyExperiment::kDefaultOffset, my_exp.offset); -} - -struct Algo1_CostFunction { - static const ConfigOptionID identifier = - ConfigOptionID::kAlgo1CostFunctionForTest; - Algo1_CostFunction() {} - - virtual int cost(int x) const { return x; } - - virtual ~Algo1_CostFunction() {} -}; - -struct SqrCost : Algo1_CostFunction { - virtual int cost(int x) const { return x * x; } -}; - -TEST(Config, SupportsPolymorphism) { - Config config; - config.Set(new SqrCost()); - EXPECT_EQ(25, config.Get().cost(5)); -} -} // namespace -} // namespace webrtc diff --git a/modules/audio_processing/echo_control_mobile_bit_exact_unittest.cc b/modules/audio_processing/echo_control_mobile_bit_exact_unittest.cc index 41a8cb8cee..f351811e08 100644 --- a/modules/audio_processing/echo_control_mobile_bit_exact_unittest.cc +++ b/modules/audio_processing/echo_control_mobile_bit_exact_unittest.cc @@ -68,7 +68,7 @@ void RunBitexactnessTest(int sample_rate_hz, &echo_control_mobile); const int samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, 100); - const StreamConfig render_config(sample_rate_hz, num_channels, false); + const StreamConfig render_config(sample_rate_hz, num_channels); AudioBuffer render_buffer( render_config.sample_rate_hz(), render_config.num_channels(), render_config.sample_rate_hz(), 1, render_config.sample_rate_hz(), 1); @@ -76,7 +76,7 @@ void RunBitexactnessTest(int sample_rate_hz, test::GetApmRenderTestVectorFileName(sample_rate_hz)); std::vector render_input(samples_per_channel * num_channels); - const StreamConfig capture_config(sample_rate_hz, num_channels, false); + const StreamConfig capture_config(sample_rate_hz, num_channels); AudioBuffer capture_buffer( capture_config.sample_rate_hz(), capture_config.num_channels(), capture_config.sample_rate_hz(), 1, capture_config.sample_rate_hz(), 1); diff --git a/modules/audio_processing/echo_control_mobile_impl.cc b/modules/audio_processing/echo_control_mobile_impl.cc index 8116608390..fa5cb8ffec 100644 --- a/modules/audio_processing/echo_control_mobile_impl.cc +++ b/modules/audio_processing/echo_control_mobile_impl.cc @@ -18,7 +18,6 @@ #include "modules/audio_processing/audio_buffer.h" #include "modules/audio_processing/include/audio_processing.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -36,7 +35,7 @@ int16_t MapSetting(EchoControlMobileImpl::RoutingMode mode) { case EchoControlMobileImpl::kLoudSpeakerphone: return 4; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return -1; } @@ -85,6 +84,9 @@ class EchoControlMobileImpl::Canceller { WebRtcAecm_Free(state_); } + Canceller(const Canceller&) = delete; + Canceller& operator=(const Canceller&) = delete; + void* state() { RTC_DCHECK(state_); return state_; @@ -98,7 +100,6 @@ class EchoControlMobileImpl::Canceller { private: void* state_; - RTC_DISALLOW_COPY_AND_ASSIGN(Canceller); }; EchoControlMobileImpl::EchoControlMobileImpl() diff --git a/modules/audio_processing/echo_control_mobile_impl.h b/modules/audio_processing/echo_control_mobile_impl.h index 23f3c06121..f7f2626a0e 100644 --- a/modules/audio_processing/echo_control_mobile_impl.h +++ b/modules/audio_processing/echo_control_mobile_impl.h @@ -42,7 +42,7 @@ class EchoControlMobileImpl { kLoudSpeakerphone }; - // Sets echo control appropriate for the audio routing |mode| on the device. + // Sets echo control appropriate for the audio routing `mode` on the device. // It can and should be updated during a call if the audio routing changes. int set_routing_mode(RoutingMode mode); RoutingMode routing_mode() const; diff --git a/modules/audio_processing/g3doc/audio_processing_module.md b/modules/audio_processing/g3doc/audio_processing_module.md index bb80dc9882..fc63b34dfe 100644 --- a/modules/audio_processing/g3doc/audio_processing_module.md +++ b/modules/audio_processing/g3doc/audio_processing_module.md @@ -10,8 +10,8 @@ microphone signal. These effects are required for VoIP calling and some examples include echo cancellation (AEC), noise suppression (NS) and automatic gain control (AGC). -The API for APM resides in [`/modules/audio_processing/include`][https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_processing/include]. -APM is created using the [`AudioProcessingBuilder`][https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/audio_processing/include/audio_processing.h] +The API for APM resides in [`/modules/audio_processing/include`][https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_processing/include]. +APM is created using the [`AudioProcessingBuilder`][https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/modules/audio_processing/include/audio_processing.h] builder that allows it to be customized and configured. Some specific aspects of APM include that: diff --git a/modules/audio_processing/gain_control_impl.cc b/modules/audio_processing/gain_control_impl.cc index b5454c05ed..3fac1f7f56 100644 --- a/modules/audio_processing/gain_control_impl.cc +++ b/modules/audio_processing/gain_control_impl.cc @@ -35,7 +35,7 @@ int16_t MapSetting(GainControl::Mode mode) { case GainControl::kFixedDigital: return kAgcModeFixedDigital; } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return -1; } diff --git a/modules/audio_processing/gain_control_unittest.cc b/modules/audio_processing/gain_control_unittest.cc index 6e0149915c..1662dc506f 100644 --- a/modules/audio_processing/gain_control_unittest.cc +++ b/modules/audio_processing/gain_control_unittest.cc @@ -77,7 +77,7 @@ void RunBitExactnessTest(int sample_rate_hz, analog_level_max, &gain_controller); const int samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, 100); - const StreamConfig render_config(sample_rate_hz, num_channels, false); + const StreamConfig render_config(sample_rate_hz, num_channels); AudioBuffer render_buffer( render_config.sample_rate_hz(), render_config.num_channels(), render_config.sample_rate_hz(), 1, render_config.sample_rate_hz(), 1); @@ -85,7 +85,7 @@ void RunBitExactnessTest(int sample_rate_hz, test::GetApmRenderTestVectorFileName(sample_rate_hz)); std::vector render_input(samples_per_channel * num_channels); - const StreamConfig capture_config(sample_rate_hz, num_channels, false); + const StreamConfig capture_config(sample_rate_hz, num_channels); AudioBuffer capture_buffer( capture_config.sample_rate_hz(), capture_config.num_channels(), capture_config.sample_rate_hz(), 1, capture_config.sample_rate_hz(), 1); diff --git a/modules/audio_processing/gain_controller2.cc b/modules/audio_processing/gain_controller2.cc index 74b63c9432..466e4b0eb4 100644 --- a/modules/audio_processing/gain_controller2.cc +++ b/modules/audio_processing/gain_controller2.cc @@ -10,7 +10,11 @@ #include "modules/audio_processing/gain_controller2.h" +#include +#include + #include "common_audio/include/audio_util.h" +#include "modules/audio_processing/agc2/cpu_features.h" #include "modules/audio_processing/audio_buffer.h" #include "modules/audio_processing/include/audio_frame_view.h" #include "modules/audio_processing/logging/apm_data_dumper.h" @@ -18,20 +22,76 @@ #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_base/strings/string_builder.h" +#include "system_wrappers/include/field_trial.h" namespace webrtc { +namespace { + +using Agc2Config = AudioProcessing::Config::GainController2; + +constexpr int kUnspecifiedAnalogLevel = -1; +constexpr int kLogLimiterStatsPeriodMs = 30'000; +constexpr int kFrameLengthMs = 10; +constexpr int kLogLimiterStatsPeriodNumFrames = + kLogLimiterStatsPeriodMs / kFrameLengthMs; + +// Detects the available CPU features and applies any kill-switches. +AvailableCpuFeatures GetAllowedCpuFeatures() { + AvailableCpuFeatures features = GetAvailableCpuFeatures(); + if (field_trial::IsEnabled("WebRTC-Agc2SimdSse2KillSwitch")) { + features.sse2 = false; + } + if (field_trial::IsEnabled("WebRTC-Agc2SimdAvx2KillSwitch")) { + features.avx2 = false; + } + if (field_trial::IsEnabled("WebRTC-Agc2SimdNeonKillSwitch")) { + features.neon = false; + } + return features; +} + +// Creates an adaptive digital gain controller if enabled. +std::unique_ptr CreateAdaptiveDigitalController( + const Agc2Config::AdaptiveDigital& config, + int sample_rate_hz, + int num_channels, + ApmDataDumper* data_dumper) { + if (config.enabled) { + return std::make_unique( + data_dumper, config, sample_rate_hz, num_channels); + } + return nullptr; +} + +} // namespace int GainController2::instance_count_ = 0; -GainController2::GainController2() - : data_dumper_(rtc::AtomicOps::Increment(&instance_count_)), - gain_applier_(/*hard_clip_samples=*/false, - /*initial_gain_factor=*/0.0f), - limiter_(static_cast(48000), &data_dumper_, "Agc2"), - calls_since_last_limiter_log_(0) { - if (config_.adaptive_digital.enabled) { - adaptive_agc_ = - std::make_unique(&data_dumper_, config_.adaptive_digital); +GainController2::GainController2(const Agc2Config& config, + int sample_rate_hz, + int num_channels) + : cpu_features_(GetAllowedCpuFeatures()), + data_dumper_(rtc::AtomicOps::Increment(&instance_count_)), + fixed_gain_applier_( + /*hard_clip_samples=*/false, + /*initial_gain_factor=*/DbToRatio(config.fixed_digital.gain_db)), + adaptive_digital_controller_( + CreateAdaptiveDigitalController(config.adaptive_digital, + sample_rate_hz, + num_channels, + &data_dumper_)), + limiter_(sample_rate_hz, &data_dumper_, /*histogram_name_prefix=*/"Agc2"), + calls_since_last_limiter_log_(0), + analog_level_(kUnspecifiedAnalogLevel) { + RTC_DCHECK(Validate(config)); + data_dumper_.InitiateNewSetOfRecordings(); + const bool use_vad = config.adaptive_digital.enabled; + if (use_vad) { + // TODO(bugs.webrtc.org/7494): Move `vad_reset_period_ms` from adaptive + // digital to gain controller 2 config. + vad_ = std::make_unique( + config.adaptive_digital.vad_reset_period_ms, cpu_features_, + sample_rate_hz); } } @@ -42,29 +102,48 @@ void GainController2::Initialize(int sample_rate_hz, int num_channels) { sample_rate_hz == AudioProcessing::kSampleRate16kHz || sample_rate_hz == AudioProcessing::kSampleRate32kHz || sample_rate_hz == AudioProcessing::kSampleRate48kHz); + // TODO(bugs.webrtc.org/7494): Initialize `fixed_gain_applier_`. limiter_.SetSampleRate(sample_rate_hz); - if (adaptive_agc_) { - adaptive_agc_->Initialize(sample_rate_hz, num_channels); + if (vad_) { + vad_->Initialize(sample_rate_hz); + } + if (adaptive_digital_controller_) { + adaptive_digital_controller_->Initialize(sample_rate_hz, num_channels); } data_dumper_.InitiateNewSetOfRecordings(); - data_dumper_.DumpRaw("sample_rate_hz", sample_rate_hz); calls_since_last_limiter_log_ = 0; + analog_level_ = kUnspecifiedAnalogLevel; +} + +void GainController2::SetFixedGainDb(float gain_db) { + const float gain_factor = DbToRatio(gain_db); + if (fixed_gain_applier_.GetGainFactor() != gain_factor) { + // Reset the limiter to quickly react on abrupt level changes caused by + // large changes of the fixed gain. + limiter_.Reset(); + } + fixed_gain_applier_.SetGainFactor(gain_factor); } void GainController2::Process(AudioBuffer* audio) { data_dumper_.DumpRaw("agc2_notified_analog_level", analog_level_); AudioFrameView float_frame(audio->channels(), audio->num_channels(), audio->num_frames()); - // Apply fixed gain first, then the adaptive one. - gain_applier_.ApplyGain(float_frame); - if (adaptive_agc_) { - adaptive_agc_->Process(float_frame, limiter_.LastAudioLevel()); + absl::optional speech_probability; + if (vad_) { + speech_probability = vad_->Analyze(float_frame); + data_dumper_.DumpRaw("agc2_speech_probability", speech_probability.value()); + } + fixed_gain_applier_.ApplyGain(float_frame); + if (adaptive_digital_controller_) { + RTC_DCHECK(speech_probability.has_value()); + adaptive_digital_controller_->Process( + float_frame, speech_probability.value(), limiter_.LastAudioLevel()); } limiter_.Process(float_frame); - // Log limiter stats every 30 seconds. - ++calls_since_last_limiter_log_; - if (calls_since_last_limiter_log_ == 3000) { + // Periodically log limiter stats. + if (++calls_since_last_limiter_log_ == kLogLimiterStatsPeriodNumFrames) { calls_since_last_limiter_log_ = 0; InterpolatedGainCurve::Stats stats = limiter_.GetGainCurveStats(); RTC_LOG(LS_INFO) << "AGC2 limiter stats" @@ -76,46 +155,21 @@ void GainController2::Process(AudioBuffer* audio) { } void GainController2::NotifyAnalogLevel(int level) { - if (analog_level_ != level && adaptive_agc_) { - adaptive_agc_->HandleInputGainChange(); + if (analog_level_ != level && adaptive_digital_controller_) { + adaptive_digital_controller_->HandleInputGainChange(); } analog_level_ = level; } -void GainController2::ApplyConfig( - const AudioProcessing::Config::GainController2& config) { - RTC_DCHECK(Validate(config)); - - config_ = config; - if (config.fixed_digital.gain_db != config_.fixed_digital.gain_db) { - // Reset the limiter to quickly react on abrupt level changes caused by - // large changes of the fixed gain. - limiter_.Reset(); - } - gain_applier_.SetGainFactor(DbToRatio(config_.fixed_digital.gain_db)); - if (config_.adaptive_digital.enabled) { - adaptive_agc_ = - std::make_unique(&data_dumper_, config_.adaptive_digital); - } else { - adaptive_agc_.reset(); - } -} - bool GainController2::Validate( const AudioProcessing::Config::GainController2& config) { const auto& fixed = config.fixed_digital; const auto& adaptive = config.adaptive_digital; - return fixed.gain_db >= 0.f && fixed.gain_db < 50.f && - adaptive.vad_probability_attack > 0.f && - adaptive.vad_probability_attack <= 1.f && - adaptive.level_estimator_adjacent_speech_frames_threshold >= 1 && - adaptive.initial_saturation_margin_db >= 0.f && - adaptive.initial_saturation_margin_db <= 100.f && - adaptive.extra_saturation_margin_db >= 0.f && - adaptive.extra_saturation_margin_db <= 100.f && - adaptive.gain_applier_adjacent_speech_frames_threshold >= 1 && - adaptive.max_gain_change_db_per_second > 0.f && - adaptive.max_output_noise_level_dbfs <= 0.f; + return fixed.gain_db >= 0.0f && fixed.gain_db < 50.f && + adaptive.headroom_db >= 0.0f && adaptive.max_gain_db > 0.0f && + adaptive.initial_gain_db >= 0.0f && + adaptive.max_gain_change_db_per_second > 0.0f && + adaptive.max_output_noise_level_dbfs <= 0.0f; } } // namespace webrtc diff --git a/modules/audio_processing/gain_controller2.h b/modules/audio_processing/gain_controller2.h index ce758c7834..8c82d745b5 100644 --- a/modules/audio_processing/gain_controller2.h +++ b/modules/audio_processing/gain_controller2.h @@ -14,12 +14,13 @@ #include #include -#include "modules/audio_processing/agc2/adaptive_agc.h" +#include "modules/audio_processing/agc2/adaptive_digital_gain_controller.h" +#include "modules/audio_processing/agc2/cpu_features.h" #include "modules/audio_processing/agc2/gain_applier.h" #include "modules/audio_processing/agc2/limiter.h" +#include "modules/audio_processing/agc2/vad_wrapper.h" #include "modules/audio_processing/include/audio_processing.h" #include "modules/audio_processing/logging/apm_data_dumper.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -29,27 +30,37 @@ class AudioBuffer; // microphone gain and/or applying digital gain. class GainController2 { public: - GainController2(); + GainController2(const AudioProcessing::Config::GainController2& config, + int sample_rate_hz, + int num_channels); GainController2(const GainController2&) = delete; GainController2& operator=(const GainController2&) = delete; ~GainController2(); + // Detects and handles changes of sample rate and/or number of channels. void Initialize(int sample_rate_hz, int num_channels); + + // Sets the fixed digital gain. + void SetFixedGainDb(float gain_db); + + // Applies fixed and adaptive digital gains to `audio` and runs a limiter. void Process(AudioBuffer* audio); + + // Handles analog level changes. void NotifyAnalogLevel(int level); - void ApplyConfig(const AudioProcessing::Config::GainController2& config); static bool Validate(const AudioProcessing::Config::GainController2& config); private: static int instance_count_; + const AvailableCpuFeatures cpu_features_; ApmDataDumper data_dumper_; - AudioProcessing::Config::GainController2 config_; - GainApplier gain_applier_; - std::unique_ptr adaptive_agc_; + GainApplier fixed_gain_applier_; + std::unique_ptr vad_; + std::unique_ptr adaptive_digital_controller_; Limiter limiter_; int calls_since_last_limiter_log_; - int analog_level_ = -1; + int analog_level_; }; } // namespace webrtc diff --git a/modules/audio_processing/gain_controller2_unittest.cc b/modules/audio_processing/gain_controller2_unittest.cc index 85c08bb750..850562f8dd 100644 --- a/modules/audio_processing/gain_controller2_unittest.cc +++ b/modules/audio_processing/gain_controller2_unittest.cc @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include "api/array_view.h" #include "modules/audio_processing/agc2/agc2_testing_common.h" @@ -26,212 +28,138 @@ namespace webrtc { namespace test { namespace { -void SetAudioBufferSamples(float value, AudioBuffer* ab) { - // Sets all the samples in |ab| to |value|. - for (size_t k = 0; k < ab->num_channels(); ++k) { - std::fill(ab->channels()[k], ab->channels()[k] + ab->num_frames(), value); +using Agc2Config = AudioProcessing::Config::GainController2; + +// Sets all the samples in `ab` to `value`. +void SetAudioBufferSamples(float value, AudioBuffer& ab) { + for (size_t k = 0; k < ab.num_channels(); ++k) { + std::fill(ab.channels()[k], ab.channels()[k] + ab.num_frames(), value); } } -float RunAgc2WithConstantInput(GainController2* agc2, +float RunAgc2WithConstantInput(GainController2& agc2, float input_level, - size_t num_frames, - int sample_rate) { - const int num_samples = rtc::CheckedDivExact(sample_rate, 100); - AudioBuffer ab(sample_rate, 1, sample_rate, 1, sample_rate, 1); + int num_frames, + int sample_rate_hz) { + const int num_samples = rtc::CheckedDivExact(sample_rate_hz, 100); + AudioBuffer ab(sample_rate_hz, 1, sample_rate_hz, 1, sample_rate_hz, 1); // Give time to the level estimator to converge. - for (size_t i = 0; i < num_frames + 1; ++i) { - SetAudioBufferSamples(input_level, &ab); - agc2->Process(&ab); + for (int i = 0; i < num_frames + 1; ++i) { + SetAudioBufferSamples(input_level, ab); + agc2.Process(&ab); } // Return the last sample from the last processed frame. return ab.channels()[0][num_samples - 1]; } -AudioProcessing::Config::GainController2 CreateAgc2FixedDigitalModeConfig( - float fixed_gain_db) { - AudioProcessing::Config::GainController2 config; +std::unique_ptr CreateAgc2FixedDigitalMode( + float fixed_gain_db, + int sample_rate_hz) { + Agc2Config config; config.adaptive_digital.enabled = false; config.fixed_digital.gain_db = fixed_gain_db; - // TODO(alessiob): Check why ASSERT_TRUE() below does not compile. EXPECT_TRUE(GainController2::Validate(config)); - return config; -} - -std::unique_ptr CreateAgc2FixedDigitalMode( - float fixed_gain_db, - size_t sample_rate_hz) { - auto agc2 = std::make_unique(); - agc2->ApplyConfig(CreateAgc2FixedDigitalModeConfig(fixed_gain_db)); - agc2->Initialize(sample_rate_hz, /*num_channels=*/1); - return agc2; -} - -float GainDbAfterProcessingFile(GainController2& gain_controller, - int max_duration_ms) { - // Set up an AudioBuffer to be filled from the speech file. - constexpr size_t kStereo = 2u; - const StreamConfig capture_config(AudioProcessing::kSampleRate48kHz, kStereo, - false); - AudioBuffer ab(capture_config.sample_rate_hz(), capture_config.num_channels(), - capture_config.sample_rate_hz(), capture_config.num_channels(), - capture_config.sample_rate_hz(), - capture_config.num_channels()); - test::InputAudioFile capture_file( - test::GetApmCaptureTestVectorFileName(AudioProcessing::kSampleRate48kHz)); - std::vector capture_input(capture_config.num_frames() * - capture_config.num_channels()); - - // Process the input file which must be long enough to cover - // `max_duration_ms`. - RTC_DCHECK_GT(max_duration_ms, 0); - const int num_frames = rtc::CheckedDivExact(max_duration_ms, 10); - for (int i = 0; i < num_frames; ++i) { - ReadFloatSamplesFromStereoFile(capture_config.num_frames(), - capture_config.num_channels(), &capture_file, - capture_input); - test::CopyVectorToAudioBuffer(capture_config, capture_input, &ab); - gain_controller.Process(&ab); - } - - // Send in a last frame with minimum dBFS level. - constexpr float sample_value = 1.f; - SetAudioBufferSamples(sample_value, &ab); - gain_controller.Process(&ab); - // Measure the RMS level after processing. - float rms = 0.0f; - for (size_t i = 0; i < capture_config.num_frames(); ++i) { - rms += ab.channels()[0][i] * ab.channels()[0][i]; - } - // Return the applied gain in dB. - return 20.0f * std::log10(std::sqrt(rms / capture_config.num_frames())); + return std::make_unique(config, sample_rate_hz, + /*num_channels=*/1); } } // namespace TEST(GainController2, CheckDefaultConfig) { - AudioProcessing::Config::GainController2 config; + Agc2Config config; EXPECT_TRUE(GainController2::Validate(config)); } TEST(GainController2, CheckFixedDigitalConfig) { - AudioProcessing::Config::GainController2 config; + Agc2Config config; // Attenuation is not allowed. - config.fixed_digital.gain_db = -5.f; + config.fixed_digital.gain_db = -5.0f; EXPECT_FALSE(GainController2::Validate(config)); // No gain is allowed. - config.fixed_digital.gain_db = 0.f; + config.fixed_digital.gain_db = 0.0f; EXPECT_TRUE(GainController2::Validate(config)); // Positive gain is allowed. - config.fixed_digital.gain_db = 15.f; + config.fixed_digital.gain_db = 15.0f; EXPECT_TRUE(GainController2::Validate(config)); } -TEST(GainController2, CheckAdaptiveDigitalVadProbabilityAttackConfig) { - AudioProcessing::Config::GainController2 config; - // Reject invalid attack. - config.adaptive_digital.vad_probability_attack = -123.f; - EXPECT_FALSE(GainController2::Validate(config)); - config.adaptive_digital.vad_probability_attack = 0.f; +TEST(GainController2, CheckHeadroomDb) { + Agc2Config config; + config.adaptive_digital.headroom_db = -1.0f; EXPECT_FALSE(GainController2::Validate(config)); - config.adaptive_digital.vad_probability_attack = 42.f; - EXPECT_FALSE(GainController2::Validate(config)); - // Accept valid attack. - config.adaptive_digital.vad_probability_attack = 0.1f; + config.adaptive_digital.headroom_db = 0.0f; EXPECT_TRUE(GainController2::Validate(config)); - config.adaptive_digital.vad_probability_attack = 1.f; + config.adaptive_digital.headroom_db = 5.0f; EXPECT_TRUE(GainController2::Validate(config)); } -TEST(GainController2, - CheckAdaptiveDigitalLevelEstimatorSpeechFramesThresholdConfig) { - AudioProcessing::Config::GainController2 config; - config.adaptive_digital.level_estimator_adjacent_speech_frames_threshold = 0; +TEST(GainController2, CheckMaxGainDb) { + Agc2Config config; + config.adaptive_digital.max_gain_db = -1.0f; EXPECT_FALSE(GainController2::Validate(config)); - config.adaptive_digital.level_estimator_adjacent_speech_frames_threshold = 1; - EXPECT_TRUE(GainController2::Validate(config)); - config.adaptive_digital.level_estimator_adjacent_speech_frames_threshold = 7; - EXPECT_TRUE(GainController2::Validate(config)); -} - -TEST(GainController2, CheckAdaptiveDigitalInitialSaturationMarginConfig) { - AudioProcessing::Config::GainController2 config; - config.adaptive_digital.initial_saturation_margin_db = -1.f; + config.adaptive_digital.max_gain_db = 0.0f; EXPECT_FALSE(GainController2::Validate(config)); - config.adaptive_digital.initial_saturation_margin_db = 0.f; - EXPECT_TRUE(GainController2::Validate(config)); - config.adaptive_digital.initial_saturation_margin_db = 50.f; + config.adaptive_digital.max_gain_db = 5.0f; EXPECT_TRUE(GainController2::Validate(config)); } -TEST(GainController2, CheckAdaptiveDigitalExtraSaturationMarginConfig) { - AudioProcessing::Config::GainController2 config; - config.adaptive_digital.extra_saturation_margin_db = -1.f; +TEST(GainController2, CheckInitialGainDb) { + Agc2Config config; + config.adaptive_digital.initial_gain_db = -1.0f; EXPECT_FALSE(GainController2::Validate(config)); - config.adaptive_digital.extra_saturation_margin_db = 0.f; + config.adaptive_digital.initial_gain_db = 0.0f; EXPECT_TRUE(GainController2::Validate(config)); - config.adaptive_digital.extra_saturation_margin_db = 50.f; - EXPECT_TRUE(GainController2::Validate(config)); -} - -TEST(GainController2, - CheckAdaptiveDigitalGainApplierSpeechFramesThresholdConfig) { - AudioProcessing::Config::GainController2 config; - config.adaptive_digital.gain_applier_adjacent_speech_frames_threshold = 0; - EXPECT_FALSE(GainController2::Validate(config)); - config.adaptive_digital.gain_applier_adjacent_speech_frames_threshold = 1; - EXPECT_TRUE(GainController2::Validate(config)); - config.adaptive_digital.gain_applier_adjacent_speech_frames_threshold = 7; + config.adaptive_digital.initial_gain_db = 5.0f; EXPECT_TRUE(GainController2::Validate(config)); } TEST(GainController2, CheckAdaptiveDigitalMaxGainChangeSpeedConfig) { - AudioProcessing::Config::GainController2 config; - config.adaptive_digital.max_gain_change_db_per_second = -1.f; + Agc2Config config; + config.adaptive_digital.max_gain_change_db_per_second = -1.0f; EXPECT_FALSE(GainController2::Validate(config)); - config.adaptive_digital.max_gain_change_db_per_second = 0.f; + config.adaptive_digital.max_gain_change_db_per_second = 0.0f; EXPECT_FALSE(GainController2::Validate(config)); - config.adaptive_digital.max_gain_change_db_per_second = 5.f; + config.adaptive_digital.max_gain_change_db_per_second = 5.0f; EXPECT_TRUE(GainController2::Validate(config)); } TEST(GainController2, CheckAdaptiveDigitalMaxOutputNoiseLevelConfig) { - AudioProcessing::Config::GainController2 config; - config.adaptive_digital.max_output_noise_level_dbfs = 5.f; + Agc2Config config; + config.adaptive_digital.max_output_noise_level_dbfs = 5.0f; EXPECT_FALSE(GainController2::Validate(config)); - config.adaptive_digital.max_output_noise_level_dbfs = 0.f; + config.adaptive_digital.max_output_noise_level_dbfs = 0.0f; EXPECT_TRUE(GainController2::Validate(config)); - config.adaptive_digital.max_output_noise_level_dbfs = -5.f; + config.adaptive_digital.max_output_noise_level_dbfs = -5.0f; EXPECT_TRUE(GainController2::Validate(config)); } // Checks that the default config is applied. TEST(GainController2, ApplyDefaultConfig) { - auto gain_controller2 = std::make_unique(); - AudioProcessing::Config::GainController2 config; - gain_controller2->ApplyConfig(config); + auto gain_controller2 = std::make_unique( + Agc2Config{}, /*sample_rate_hz=*/16000, /*num_channels=*/2); + EXPECT_TRUE(gain_controller2.get()); } TEST(GainController2FixedDigital, GainShouldChangeOnSetGain) { - constexpr float kInputLevel = 1000.f; + constexpr float kInputLevel = 1000.0f; constexpr size_t kNumFrames = 5; constexpr size_t kSampleRateHz = 8000; - constexpr float kGain0Db = 0.f; - constexpr float kGain20Db = 20.f; + constexpr float kGain0Db = 0.0f; + constexpr float kGain20Db = 20.0f; auto agc2_fixed = CreateAgc2FixedDigitalMode(kGain0Db, kSampleRateHz); // Signal level is unchanged with 0 db gain. - EXPECT_FLOAT_EQ(RunAgc2WithConstantInput(agc2_fixed.get(), kInputLevel, - kNumFrames, kSampleRateHz), + EXPECT_FLOAT_EQ(RunAgc2WithConstantInput(*agc2_fixed, kInputLevel, kNumFrames, + kSampleRateHz), kInputLevel); // +20 db should increase signal by a factor of 10. - agc2_fixed->ApplyConfig(CreateAgc2FixedDigitalModeConfig(kGain20Db)); - EXPECT_FLOAT_EQ(RunAgc2WithConstantInput(agc2_fixed.get(), kInputLevel, - kNumFrames, kSampleRateHz), + agc2_fixed->SetFixedGainDb(kGain20Db); + EXPECT_FLOAT_EQ(RunAgc2WithConstantInput(*agc2_fixed, kInputLevel, kNumFrames, + kSampleRateHz), kInputLevel * 10); } @@ -240,67 +168,51 @@ TEST(GainController2FixedDigital, ChangeFixedGainShouldBeFastAndTimeInvariant) { // input signal when the gain changes. constexpr size_t kNumFrames = 5; - constexpr float kInputLevel = 1000.f; + constexpr float kInputLevel = 1000.0f; constexpr size_t kSampleRateHz = 8000; - constexpr float kGainDbLow = 0.f; - constexpr float kGainDbHigh = 25.f; + constexpr float kGainDbLow = 0.0f; + constexpr float kGainDbHigh = 25.0f; static_assert(kGainDbLow < kGainDbHigh, ""); auto agc2_fixed = CreateAgc2FixedDigitalMode(kGainDbLow, kSampleRateHz); // Start with a lower gain. const float output_level_pre = RunAgc2WithConstantInput( - agc2_fixed.get(), kInputLevel, kNumFrames, kSampleRateHz); + *agc2_fixed, kInputLevel, kNumFrames, kSampleRateHz); // Increase gain. - agc2_fixed->ApplyConfig(CreateAgc2FixedDigitalModeConfig(kGainDbHigh)); - static_cast(RunAgc2WithConstantInput(agc2_fixed.get(), kInputLevel, + agc2_fixed->SetFixedGainDb(kGainDbHigh); + static_cast(RunAgc2WithConstantInput(*agc2_fixed, kInputLevel, kNumFrames, kSampleRateHz)); // Back to the lower gain. - agc2_fixed->ApplyConfig(CreateAgc2FixedDigitalModeConfig(kGainDbLow)); + agc2_fixed->SetFixedGainDb(kGainDbLow); const float output_level_post = RunAgc2WithConstantInput( - agc2_fixed.get(), kInputLevel, kNumFrames, kSampleRateHz); + *agc2_fixed, kInputLevel, kNumFrames, kSampleRateHz); EXPECT_EQ(output_level_pre, output_level_post); } -struct FixedDigitalTestParams { - FixedDigitalTestParams(float gain_db_min, - float gain_db_max, - size_t sample_rate, - bool saturation_expected) - : gain_db_min(gain_db_min), - gain_db_max(gain_db_max), - sample_rate(sample_rate), - saturation_expected(saturation_expected) {} - float gain_db_min; - float gain_db_max; - size_t sample_rate; - bool saturation_expected; -}; - class FixedDigitalTest - : public ::testing::Test, - public ::testing::WithParamInterface {}; + : public ::testing::TestWithParam> { + protected: + float gain_db_min() const { return std::get<0>(GetParam()); } + float gain_db_max() const { return std::get<1>(GetParam()); } + int sample_rate_hz() const { return std::get<2>(GetParam()); } + bool saturation_expected() const { return std::get<3>(GetParam()); } +}; TEST_P(FixedDigitalTest, CheckSaturationBehaviorWithLimiter) { - const float kInputLevel = 32767.f; - const size_t kNumFrames = 5; - - const auto params = GetParam(); - - const auto gains_db = - test::LinSpace(params.gain_db_min, params.gain_db_max, 10); - for (const auto gain_db : gains_db) { - SCOPED_TRACE(std::to_string(gain_db)); - auto agc2_fixed = CreateAgc2FixedDigitalMode(gain_db, params.sample_rate); - const float processed_sample = RunAgc2WithConstantInput( - agc2_fixed.get(), kInputLevel, kNumFrames, params.sample_rate); - if (params.saturation_expected) { - EXPECT_FLOAT_EQ(processed_sample, 32767.f); + for (const float gain_db : test::LinSpace(gain_db_min(), gain_db_max(), 10)) { + SCOPED_TRACE(gain_db); + auto agc2_fixed = CreateAgc2FixedDigitalMode(gain_db, sample_rate_hz()); + const float processed_sample = + RunAgc2WithConstantInput(*agc2_fixed, /*input_level=*/32767.0f, + /*num_frames=*/5, sample_rate_hz()); + if (saturation_expected()) { + EXPECT_FLOAT_EQ(processed_sample, 32767.0f); } else { - EXPECT_LT(processed_sample, 32767.f); + EXPECT_LT(processed_sample, 32767.0f); } } } @@ -310,42 +222,76 @@ INSTANTIATE_TEST_SUITE_P( GainController2, FixedDigitalTest, ::testing::Values( - // When gain < |test::kLimiterMaxInputLevelDbFs|, the limiter will not + // When gain < `test::kLimiterMaxInputLevelDbFs`, the limiter will not // saturate the signal (at any sample rate). - FixedDigitalTestParams(0.1f, - test::kLimiterMaxInputLevelDbFs - 0.01f, - 8000, - false), - FixedDigitalTestParams(0.1, - test::kLimiterMaxInputLevelDbFs - 0.01f, - 48000, - false), - // When gain > |test::kLimiterMaxInputLevelDbFs|, the limiter will + std::make_tuple(0.1f, + test::kLimiterMaxInputLevelDbFs - 0.01f, + 8000, + false), + std::make_tuple(0.1, + test::kLimiterMaxInputLevelDbFs - 0.01f, + 48000, + false), + // When gain > `test::kLimiterMaxInputLevelDbFs`, the limiter will // saturate the signal (at any sample rate). - FixedDigitalTestParams(test::kLimiterMaxInputLevelDbFs + 0.01f, - 10.f, - 8000, - true), - FixedDigitalTestParams(test::kLimiterMaxInputLevelDbFs + 0.01f, - 10.f, - 48000, - true))); - -// Checks that the gain applied at the end of a PCM samples file is close to the -// expected value. -TEST(GainController2, CheckGainAdaptiveDigital) { - constexpr float kExpectedGainDb = 4.3f; - constexpr float kToleranceDb = 0.5f; - GainController2 gain_controller2; - gain_controller2.Initialize(AudioProcessing::kSampleRate48kHz, - /*num_channels=*/1); - AudioProcessing::Config::GainController2 config; + std::make_tuple(test::kLimiterMaxInputLevelDbFs + 0.01f, + 10.0f, + 8000, + true), + std::make_tuple(test::kLimiterMaxInputLevelDbFs + 0.01f, + 10.0f, + 48000, + true))); + +// Processes a test audio file and checks that the gain applied at the end of +// the recording is close to the expected value. +TEST(GainController2, CheckFinalGainWithAdaptiveDigitalController) { + constexpr int kSampleRateHz = AudioProcessing::kSampleRate48kHz; + constexpr int kStereo = 2; + + // Create AGC2 enabling only the adaptive digital controller. + Agc2Config config; config.fixed_digital.gain_db = 0.0f; config.adaptive_digital.enabled = true; - gain_controller2.ApplyConfig(config); - EXPECT_NEAR( - GainDbAfterProcessingFile(gain_controller2, /*max_duration_ms=*/2000), - kExpectedGainDb, kToleranceDb); + GainController2 agc2(config, kSampleRateHz, kStereo); + + test::InputAudioFile input_file( + test::GetApmCaptureTestVectorFileName(kSampleRateHz), + /*loop_at_end=*/true); + const StreamConfig stream_config(kSampleRateHz, kStereo); + + // Init buffers. + constexpr int kFrameDurationMs = 10; + std::vector frame(kStereo * stream_config.num_frames()); + AudioBuffer audio_buffer(kSampleRateHz, kStereo, kSampleRateHz, kStereo, + kSampleRateHz, kStereo); + + // Simulate. + constexpr float kGainDb = -6.0f; + const float gain = std::pow(10.0f, kGainDb / 20.0f); + constexpr int kDurationMs = 10000; + constexpr int kNumFramesToProcess = kDurationMs / kFrameDurationMs; + for (int i = 0; i < kNumFramesToProcess; ++i) { + ReadFloatSamplesFromStereoFile(stream_config.num_frames(), + stream_config.num_channels(), &input_file, + frame); + // Apply a fixed gain to the input audio. + for (float& x : frame) + x *= gain; + test::CopyVectorToAudioBuffer(stream_config, frame, &audio_buffer); + // Process. + agc2.Process(&audio_buffer); + } + + // Estimate the applied gain by processing a probing frame. + SetAudioBufferSamples(/*value=*/1.0f, audio_buffer); + agc2.Process(&audio_buffer); + const float applied_gain_db = + 20.0f * std::log10(audio_buffer.channels_const()[0][0]); + + constexpr float kExpectedGainDb = 5.6f; + constexpr float kToleranceDb = 0.3f; + EXPECT_NEAR(applied_gain_db, kExpectedGainDb, kToleranceDb); } } // namespace test diff --git a/modules/audio_processing/high_pass_filter.cc b/modules/audio_processing/high_pass_filter.cc index bff7209e96..3b4740f6a5 100644 --- a/modules/audio_processing/high_pass_filter.cc +++ b/modules/audio_processing/high_pass_filter.cc @@ -44,9 +44,9 @@ const CascadedBiQuadFilter::BiQuadCoefficients& ChooseCoefficients( case 48000: return kHighPassFilterCoefficients48kHz; default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return kHighPassFilterCoefficients16kHz; } diff --git a/modules/audio_processing/high_pass_filter_unittest.cc b/modules/audio_processing/high_pass_filter_unittest.cc index f8e7226b6c..9f3c8fe595 100644 --- a/modules/audio_processing/high_pass_filter_unittest.cc +++ b/modules/audio_processing/high_pass_filter_unittest.cc @@ -75,7 +75,7 @@ void RunBitexactnessTest(int num_channels, bool use_audio_buffer_interface, const std::vector& input, const std::vector& reference) { - const StreamConfig stream_config(16000, num_channels, false); + const StreamConfig stream_config(16000, num_channels); HighPassFilter high_pass_filter(16000, num_channels); std::vector output; @@ -131,8 +131,8 @@ std::vector CreateVector(const rtc::ArrayView& array_view) { } // namespace TEST(HighPassFilterAccuracyTest, ResetWithAudioBufferInterface) { - const StreamConfig stream_config_stereo(16000, 2, false); - const StreamConfig stream_config_mono(16000, 1, false); + const StreamConfig stream_config_stereo(16000, 2); + const StreamConfig stream_config_mono(16000, 1); std::vector x_mono(160, 1.f); std::vector x_stereo(320, 1.f); HighPassFilter hpf(16000, 1); @@ -147,8 +147,8 @@ TEST(HighPassFilterAccuracyTest, ResetWithAudioBufferInterface) { } TEST(HighPassFilterAccuracyTest, ResetWithVectorInterface) { - const StreamConfig stream_config_stereo(16000, 2, false); - const StreamConfig stream_config_mono(16000, 1, false); + const StreamConfig stream_config_stereo(16000, 2); + const StreamConfig stream_config_mono(16000, 1); std::vector x_mono(160, 1.f); std::vector x_stereo(320, 1.f); HighPassFilter hpf(16000, 1); diff --git a/modules/audio_processing/include/audio_frame_proxies.cc b/modules/audio_processing/include/audio_frame_proxies.cc index b960e72e86..7cc4fb75e4 100644 --- a/modules/audio_processing/include/audio_frame_proxies.cc +++ b/modules/audio_processing/include/audio_frame_proxies.cc @@ -20,10 +20,8 @@ int ProcessAudioFrame(AudioProcessing* ap, AudioFrame* frame) { return AudioProcessing::Error::kNullPointerError; } - StreamConfig input_config(frame->sample_rate_hz_, frame->num_channels_, - /*has_keyboard=*/false); - StreamConfig output_config(frame->sample_rate_hz_, frame->num_channels_, - /*has_keyboard=*/false); + StreamConfig input_config(frame->sample_rate_hz_, frame->num_channels_); + StreamConfig output_config(frame->sample_rate_hz_, frame->num_channels_); RTC_DCHECK_EQ(frame->samples_per_channel(), input_config.num_frames()); int result = ap->ProcessStream(frame->data(), input_config, output_config, @@ -57,10 +55,8 @@ int ProcessReverseAudioFrame(AudioProcessing* ap, AudioFrame* frame) { return AudioProcessing::Error::kBadNumberChannelsError; } - StreamConfig input_config(frame->sample_rate_hz_, frame->num_channels_, - /*has_keyboard=*/false); - StreamConfig output_config(frame->sample_rate_hz_, frame->num_channels_, - /*has_keyboard=*/false); + StreamConfig input_config(frame->sample_rate_hz_, frame->num_channels_); + StreamConfig output_config(frame->sample_rate_hz_, frame->num_channels_); int result = ap->ProcessReverseStream(frame->data(), input_config, output_config, frame->mutable_data()); diff --git a/modules/audio_processing/include/audio_frame_proxies.h b/modules/audio_processing/include/audio_frame_proxies.h index 2d0f5b52e2..5dd111ca2b 100644 --- a/modules/audio_processing/include/audio_frame_proxies.h +++ b/modules/audio_processing/include/audio_frame_proxies.h @@ -16,21 +16,21 @@ namespace webrtc { class AudioFrame; class AudioProcessing; -// Processes a 10 ms |frame| of the primary audio stream using the provided +// Processes a 10 ms `frame` of the primary audio stream using the provided // AudioProcessing object. On the client-side, this is the near-end (or -// captured) audio. The |sample_rate_hz_|, |num_channels_|, and -// |samples_per_channel_| members of |frame| must be valid. If changed from the +// captured) audio. The `sample_rate_hz_`, `num_channels_`, and +// `samples_per_channel_` members of `frame` must be valid. If changed from the // previous call to this function, it will trigger an initialization of the // provided AudioProcessing object. // The function returns any error codes passed from the AudioProcessing // ProcessStream method. int ProcessAudioFrame(AudioProcessing* ap, AudioFrame* frame); -// Processes a 10 ms |frame| of the reverse direction audio stream using the +// Processes a 10 ms `frame` of the reverse direction audio stream using the // provided AudioProcessing object. The frame may be modified. On the // client-side, this is the far-end (or to be rendered) audio. The -// |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_| members of -// |frame| must be valid. If changed from the previous call to this function, it +// `sample_rate_hz_`, `num_channels_`, and `samples_per_channel_` members of +// `frame` must be valid. If changed from the previous call to this function, it // will trigger an initialization of the provided AudioProcessing object. // The function returns any error codes passed from the AudioProcessing // ProcessReverseStream method. diff --git a/modules/audio_processing/include/audio_frame_view.h b/modules/audio_processing/include/audio_frame_view.h index ab5779ad6b..164784a7cc 100644 --- a/modules/audio_processing/include/audio_frame_view.h +++ b/modules/audio_processing/include/audio_frame_view.h @@ -19,15 +19,16 @@ namespace webrtc { template class AudioFrameView { public: - // |num_channels| and |channel_size| describe the T** - // |audio_samples|. |audio_samples| is assumed to point to a + // `num_channels` and `channel_size` describe the T** + // `audio_samples`. `audio_samples` is assumed to point to a // two-dimensional |num_channels * channel_size| array of floats. - AudioFrameView(T* const* audio_samples, - size_t num_channels, - size_t channel_size) + AudioFrameView(T* const* audio_samples, int num_channels, int channel_size) : audio_samples_(audio_samples), num_channels_(num_channels), - channel_size_(channel_size) {} + channel_size_(channel_size) { + RTC_DCHECK_GE(num_channels_, 0); + RTC_DCHECK_GE(channel_size_, 0); + } // Implicit cast to allow converting Frame to // Frame. @@ -39,17 +40,17 @@ class AudioFrameView { AudioFrameView() = delete; - size_t num_channels() const { return num_channels_; } + int num_channels() const { return num_channels_; } - size_t samples_per_channel() const { return channel_size_; } + int samples_per_channel() const { return channel_size_; } - rtc::ArrayView channel(size_t idx) { + rtc::ArrayView channel(int idx) { RTC_DCHECK_LE(0, idx); RTC_DCHECK_LE(idx, num_channels_); return rtc::ArrayView(audio_samples_[idx], channel_size_); } - rtc::ArrayView channel(size_t idx) const { + rtc::ArrayView channel(int idx) const { RTC_DCHECK_LE(0, idx); RTC_DCHECK_LE(idx, num_channels_); return rtc::ArrayView(audio_samples_[idx], channel_size_); @@ -59,8 +60,8 @@ class AudioFrameView { private: T* const* audio_samples_; - size_t num_channels_; - size_t channel_size_; + int num_channels_; + int channel_size_; }; } // namespace webrtc diff --git a/modules/audio_processing/include/audio_processing.cc b/modules/audio_processing/include/audio_processing.cc index 44a90d6e76..86edaee087 100644 --- a/modules/audio_processing/include/audio_processing.cc +++ b/modules/audio_processing/include/audio_processing.cc @@ -46,17 +46,6 @@ std::string GainController1ModeToString(const Agc1Config::Mode& mode) { RTC_CHECK_NOTREACHED(); } -std::string GainController2NoiseEstimatorToString( - const Agc2Config::NoiseEstimator& type) { - switch (type) { - case Agc2Config::NoiseEstimator::kStationaryNoise: - return "StationaryNoise"; - case Agc2Config::NoiseEstimator::kNoiseFloor: - return "NoiseFloor"; - } - RTC_CHECK_NOTREACHED(); -} - } // namespace constexpr int AudioProcessing::kNativeSampleRatesHz[]; @@ -71,8 +60,6 @@ bool Agc1Config::operator==(const Agc1Config& rhs) const { target_level_dbfs == rhs.target_level_dbfs && compression_gain_db == rhs.compression_gain_db && enable_limiter == rhs.enable_limiter && - analog_level_minimum == rhs.analog_level_minimum && - analog_level_maximum == rhs.analog_level_maximum && analog_lhs.enabled == analog_rhs.enabled && analog_lhs.startup_min_volume == analog_rhs.startup_min_volume && analog_lhs.clipped_level_min == analog_rhs.clipped_level_min && @@ -93,20 +80,21 @@ bool Agc1Config::operator==(const Agc1Config& rhs) const { analog_lhs.clipping_predictor.clipping_threshold == analog_rhs.clipping_predictor.clipping_threshold && analog_lhs.clipping_predictor.crest_factor_margin == - analog_rhs.clipping_predictor.crest_factor_margin; + analog_rhs.clipping_predictor.crest_factor_margin && + analog_lhs.clipping_predictor.use_predicted_step == + analog_rhs.clipping_predictor.use_predicted_step; } bool Agc2Config::AdaptiveDigital::operator==( const Agc2Config::AdaptiveDigital& rhs) const { return enabled == rhs.enabled && dry_run == rhs.dry_run && - noise_estimator == rhs.noise_estimator && + headroom_db == rhs.headroom_db && max_gain_db == rhs.max_gain_db && + initial_gain_db == rhs.initial_gain_db && vad_reset_period_ms == rhs.vad_reset_period_ms && adjacent_speech_frames_threshold == rhs.adjacent_speech_frames_threshold && max_gain_change_db_per_second == rhs.max_gain_change_db_per_second && - max_output_noise_level_dbfs == rhs.max_output_noise_level_dbfs && - sse2_allowed == rhs.sse2_allowed && avx2_allowed == rhs.avx2_allowed && - neon_allowed == rhs.neon_allowed; + max_output_noise_level_dbfs == rhs.max_output_noise_level_dbfs; } bool Agc2Config::operator==(const Agc2Config& rhs) const { @@ -118,7 +106,7 @@ bool Agc2Config::operator==(const Agc2Config& rhs) const { bool AudioProcessing::Config::CaptureLevelAdjustment::operator==( const AudioProcessing::Config::CaptureLevelAdjustment& rhs) const { return enabled == rhs.enabled && pre_gain_factor == rhs.pre_gain_factor && - post_gain_factor && rhs.post_gain_factor && + post_gain_factor == rhs.post_gain_factor && analog_mic_gain_emulation == rhs.analog_mic_gain_emulation; } @@ -131,97 +119,92 @@ operator==(const AudioProcessing::Config::CaptureLevelAdjustment:: std::string AudioProcessing::Config::ToString() const { char buf[2048]; rtc::SimpleStringBuilder builder(buf); - builder - << "AudioProcessing::Config{ " - "pipeline: { " - "maximum_internal_processing_rate: " - << pipeline.maximum_internal_processing_rate - << ", multi_channel_render: " << pipeline.multi_channel_render - << ", multi_channel_capture: " << pipeline.multi_channel_capture - << " }, pre_amplifier: { enabled: " << pre_amplifier.enabled - << ", fixed_gain_factor: " << pre_amplifier.fixed_gain_factor - << " },capture_level_adjustment: { enabled: " - << capture_level_adjustment.enabled - << ", pre_gain_factor: " << capture_level_adjustment.pre_gain_factor - << ", post_gain_factor: " << capture_level_adjustment.post_gain_factor - << ", analog_mic_gain_emulation: { enabled: " - << capture_level_adjustment.analog_mic_gain_emulation.enabled - << ", initial_level: " - << capture_level_adjustment.analog_mic_gain_emulation.initial_level - << " }}, high_pass_filter: { enabled: " << high_pass_filter.enabled - << " }, echo_canceller: { enabled: " << echo_canceller.enabled - << ", mobile_mode: " << echo_canceller.mobile_mode - << ", enforce_high_pass_filtering: " - << echo_canceller.enforce_high_pass_filtering - << " }, noise_suppression: { enabled: " << noise_suppression.enabled - << ", level: " << NoiseSuppressionLevelToString(noise_suppression.level) - << " }, transient_suppression: { enabled: " - << transient_suppression.enabled - << " }, voice_detection: { enabled: " << voice_detection.enabled - << " }, gain_controller1: { enabled: " << gain_controller1.enabled - << ", mode: " << GainController1ModeToString(gain_controller1.mode) - << ", target_level_dbfs: " << gain_controller1.target_level_dbfs - << ", compression_gain_db: " << gain_controller1.compression_gain_db - << ", enable_limiter: " << gain_controller1.enable_limiter - << ", analog_level_minimum: " << gain_controller1.analog_level_minimum - << ", analog_level_maximum: " << gain_controller1.analog_level_maximum - << ", analog_gain_controller { enabled: " - << gain_controller1.analog_gain_controller.enabled - << ", startup_min_volume: " - << gain_controller1.analog_gain_controller.startup_min_volume - << ", clipped_level_min: " - << gain_controller1.analog_gain_controller.clipped_level_min - << ", enable_digital_adaptive: " - << gain_controller1.analog_gain_controller.enable_digital_adaptive - << ", clipped_level_step: " - << gain_controller1.analog_gain_controller.clipped_level_step - << ", clipped_ratio_threshold: " - << gain_controller1.analog_gain_controller.clipped_ratio_threshold - << ", clipped_wait_frames: " - << gain_controller1.analog_gain_controller.clipped_wait_frames - << ", clipping_predictor: { enabled: " - << gain_controller1.analog_gain_controller.clipping_predictor.enabled - << ", mode: " - << gain_controller1.analog_gain_controller.clipping_predictor.mode - << ", window_length: " - << gain_controller1.analog_gain_controller.clipping_predictor - .window_length - << ", reference_window_length: " - << gain_controller1.analog_gain_controller.clipping_predictor - .reference_window_length - << ", reference_window_delay: " - << gain_controller1.analog_gain_controller.clipping_predictor - .reference_window_delay - << ", clipping_threshold: " - << gain_controller1.analog_gain_controller.clipping_predictor - .clipping_threshold - << ", crest_factor_margin: " - << gain_controller1.analog_gain_controller.clipping_predictor - .crest_factor_margin - << " }}}, gain_controller2: { enabled: " << gain_controller2.enabled - << ", fixed_digital: { gain_db: " - << gain_controller2.fixed_digital.gain_db - << " }, adaptive_digital: { enabled: " - << gain_controller2.adaptive_digital.enabled - << ", dry_run: " << gain_controller2.adaptive_digital.dry_run - << ", noise_estimator: " - << GainController2NoiseEstimatorToString( - gain_controller2.adaptive_digital.noise_estimator) - << ", vad_reset_period_ms: " - << gain_controller2.adaptive_digital.vad_reset_period_ms - << ", adjacent_speech_frames_threshold: " - << gain_controller2.adaptive_digital.adjacent_speech_frames_threshold - << ", max_gain_change_db_per_second: " - << gain_controller2.adaptive_digital.max_gain_change_db_per_second - << ", max_output_noise_level_dbfs: " - << gain_controller2.adaptive_digital.max_output_noise_level_dbfs - << ", sse2_allowed: " << gain_controller2.adaptive_digital.sse2_allowed - << ", avx2_allowed: " << gain_controller2.adaptive_digital.avx2_allowed - << ", neon_allowed: " << gain_controller2.adaptive_digital.neon_allowed - << "}}, residual_echo_detector: { enabled: " - << residual_echo_detector.enabled - << " }, level_estimation: { enabled: " << level_estimation.enabled - << " }}"; + builder << "AudioProcessing::Config{ " + "pipeline: { " + "maximum_internal_processing_rate: " + << pipeline.maximum_internal_processing_rate + << ", multi_channel_render: " << pipeline.multi_channel_render + << ", multi_channel_capture: " << pipeline.multi_channel_capture + << " }, pre_amplifier: { enabled: " << pre_amplifier.enabled + << ", fixed_gain_factor: " << pre_amplifier.fixed_gain_factor + << " },capture_level_adjustment: { enabled: " + << capture_level_adjustment.enabled + << ", pre_gain_factor: " << capture_level_adjustment.pre_gain_factor + << ", post_gain_factor: " << capture_level_adjustment.post_gain_factor + << ", analog_mic_gain_emulation: { enabled: " + << capture_level_adjustment.analog_mic_gain_emulation.enabled + << ", initial_level: " + << capture_level_adjustment.analog_mic_gain_emulation.initial_level + << " }}, high_pass_filter: { enabled: " << high_pass_filter.enabled + << " }, echo_canceller: { enabled: " << echo_canceller.enabled + << ", mobile_mode: " << echo_canceller.mobile_mode + << ", enforce_high_pass_filtering: " + << echo_canceller.enforce_high_pass_filtering + << " }, noise_suppression: { enabled: " << noise_suppression.enabled + << ", level: " + << NoiseSuppressionLevelToString(noise_suppression.level) + << " }, transient_suppression: { enabled: " + << transient_suppression.enabled + << " }, gain_controller1: { enabled: " << gain_controller1.enabled + << ", mode: " << GainController1ModeToString(gain_controller1.mode) + << ", target_level_dbfs: " << gain_controller1.target_level_dbfs + << ", compression_gain_db: " << gain_controller1.compression_gain_db + << ", enable_limiter: " << gain_controller1.enable_limiter + << ", analog_gain_controller { enabled: " + << gain_controller1.analog_gain_controller.enabled + << ", startup_min_volume: " + << gain_controller1.analog_gain_controller.startup_min_volume + << ", clipped_level_min: " + << gain_controller1.analog_gain_controller.clipped_level_min + << ", enable_digital_adaptive: " + << gain_controller1.analog_gain_controller.enable_digital_adaptive + << ", clipped_level_step: " + << gain_controller1.analog_gain_controller.clipped_level_step + << ", clipped_ratio_threshold: " + << gain_controller1.analog_gain_controller.clipped_ratio_threshold + << ", clipped_wait_frames: " + << gain_controller1.analog_gain_controller.clipped_wait_frames + << ", clipping_predictor: { enabled: " + << gain_controller1.analog_gain_controller.clipping_predictor.enabled + << ", mode: " + << gain_controller1.analog_gain_controller.clipping_predictor.mode + << ", window_length: " + << gain_controller1.analog_gain_controller.clipping_predictor + .window_length + << ", reference_window_length: " + << gain_controller1.analog_gain_controller.clipping_predictor + .reference_window_length + << ", reference_window_delay: " + << gain_controller1.analog_gain_controller.clipping_predictor + .reference_window_delay + << ", clipping_threshold: " + << gain_controller1.analog_gain_controller.clipping_predictor + .clipping_threshold + << ", crest_factor_margin: " + << gain_controller1.analog_gain_controller.clipping_predictor + .crest_factor_margin + << ", use_predicted_step: " + << gain_controller1.analog_gain_controller.clipping_predictor + .use_predicted_step + << " }}}, gain_controller2: { enabled: " << gain_controller2.enabled + << ", fixed_digital: { gain_db: " + << gain_controller2.fixed_digital.gain_db + << " }, adaptive_digital: { enabled: " + << gain_controller2.adaptive_digital.enabled + << ", dry_run: " << gain_controller2.adaptive_digital.dry_run + << ", headroom_db: " << gain_controller2.adaptive_digital.headroom_db + << ", max_gain_db: " << gain_controller2.adaptive_digital.max_gain_db + << ", initial_gain_db: " + << gain_controller2.adaptive_digital.initial_gain_db + << ", vad_reset_period_ms: " + << gain_controller2.adaptive_digital.vad_reset_period_ms + << ", adjacent_speech_frames_threshold: " + << gain_controller2.adaptive_digital.adjacent_speech_frames_threshold + << ", max_gain_change_db_per_second: " + << gain_controller2.adaptive_digital.max_gain_change_db_per_second + << ", max_output_noise_level_dbfs: " + << gain_controller2.adaptive_digital.max_output_noise_level_dbfs + << "}}"; return builder.str(); } diff --git a/modules/audio_processing/include/audio_processing.h b/modules/audio_processing/include/audio_processing.h index 64b1b5d107..9d6824c038 100644 --- a/modules/audio_processing/include/audio_processing.h +++ b/modules/audio_processing/include/audio_processing.h @@ -29,9 +29,7 @@ #include "api/audio/echo_control.h" #include "api/scoped_refptr.h" #include "modules/audio_processing/include/audio_processing_statistics.h" -#include "modules/audio_processing/include/config.h" #include "rtc_base/arraysize.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/ref_count.h" #include "rtc_base/system/file_wrapper.h" #include "rtc_base/system/rtc_export.h" @@ -53,7 +51,7 @@ class CustomAudioAnalyzer; class CustomProcessing; // Use to enable experimental gain control (AGC). At startup the experimental -// AGC moves the microphone volume up to |startup_min_volume| if the current +// AGC moves the microphone volume up to `startup_min_volume` if the current // microphone volume is set too low. The value is clamped to its operating range // [12, 255]. Here, 255 maps to 100%. // @@ -65,42 +63,13 @@ static constexpr int kAgcStartupMinVolume = 0; #endif // defined(WEBRTC_CHROMIUM_BUILD) static constexpr int kClippedLevelMin = 70; -// To be deprecated: Please instead use the flag in the -// AudioProcessing::Config::AnalogGainController. -// TODO(webrtc:5298): Remove. -struct ExperimentalAgc { - ExperimentalAgc() = default; - explicit ExperimentalAgc(bool enabled) : enabled(enabled) {} - ExperimentalAgc(bool enabled, int startup_min_volume) - : enabled(enabled), startup_min_volume(startup_min_volume) {} - static const ConfigOptionID identifier = ConfigOptionID::kExperimentalAgc; - bool enabled = true; - int startup_min_volume = kAgcStartupMinVolume; - // Lowest microphone level that will be applied in response to clipping. - int clipped_level_min = kClippedLevelMin; - bool digital_adaptive_disabled = false; -}; - -// To be deprecated: Please instead use the flag in the -// AudioProcessing::Config::TransientSuppression. -// -// Use to enable experimental noise suppression. It can be set in the -// constructor. -// TODO(webrtc:5298): Remove. -struct ExperimentalNs { - ExperimentalNs() : enabled(false) {} - explicit ExperimentalNs(bool enabled) : enabled(enabled) {} - static const ConfigOptionID identifier = ConfigOptionID::kExperimentalNs; - bool enabled; -}; - // The Audio Processing Module (APM) provides a collection of voice processing // components designed for real-time communications software. // // APM operates on two audio streams on a frame-by-frame basis. Frames of the // primary stream, on which all processing is applied, are passed to -// |ProcessStream()|. Frames of the reverse direction stream are passed to -// |ProcessReverseStream()|. On the client-side, this will typically be the +// `ProcessStream()`. Frames of the reverse direction stream are passed to +// `ProcessReverseStream()`. On the client-side, this will typically be the // near-end (capture) and far-end (render) streams, respectively. APM should be // placed in the signal chain as close to the audio hardware abstraction layer // (HAL) as possible. @@ -144,8 +113,6 @@ struct ExperimentalNs { // // config.high_pass_filter.enabled = true; // -// config.voice_detection.enabled = true; -// // apm->ApplyConfig(config) // // apm->noise_reduction()->set_level(kHighSuppression); @@ -191,7 +158,6 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { // submodule resets, affecting the audio quality. Use the RuntimeSetting // construct for runtime configuration. struct RTC_EXPORT Config { - // Sets the properties of the audio processing pipeline. struct RTC_EXPORT Pipeline { // Maximum allowed processing rate used internally. May only be set to @@ -264,11 +230,6 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { bool enabled = false; } transient_suppression; - // Enables reporting of |voice_detected| in webrtc::AudioProcessingStats. - struct VoiceDetection { - bool enabled = false; - } voice_detection; - // Enables automatic gain control (AGC) functionality. // The automatic gain control (AGC) component brings the signal to an // appropriate range. This is done by applying a digital gain directly and, @@ -321,10 +282,6 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { // target level. Otherwise, the signal will be compressed but not limited // above the target level. bool enable_limiter = true; - // Sets the minimum and maximum analog levels of the audio capture device. - // Must be set if an analog mode is used. Limited to [0, 65535]. - int analog_level_minimum = 0; - int analog_level_maximum = 255; // Enables the analog gain controller functionality. struct AnalogGainController { @@ -377,7 +334,7 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { // Enables the next generation AGC functionality. This feature replaces the // standard methods of gain control in the previous AGC. Enabling this // submodule enables an adaptive digital AGC followed by a limiter. By - // setting |fixed_gain_db|, the limiter can be turned into a compressor that + // setting `fixed_gain_db`, the limiter can be turned into a compressor that // first applies a fixed gain. The adaptive digital AGC can be turned off by // setting |adaptive_digital_mode=false|. struct RTC_EXPORT GainController2 { @@ -386,9 +343,6 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { return !(*this == rhs); } - // TODO(crbug.com/webrtc/7494): Remove `LevelEstimator`. - enum LevelEstimator { kRms, kPeak }; - enum NoiseEstimator { kStationaryNoise, kNoiseFloor }; bool enabled = false; struct FixedDigital { float gain_db = 0.0f; @@ -400,50 +354,30 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { } bool enabled = false; - // Run the adaptive digital controller but the signal is not modified. + // When true, the adaptive digital controller runs but the signal is not + // modified. bool dry_run = false; - NoiseEstimator noise_estimator = kNoiseFloor; + float headroom_db = 6.0f; + // TODO(bugs.webrtc.org/7494): Consider removing and inferring from + // `max_output_noise_level_dbfs`. + float max_gain_db = 30.0f; + float initial_gain_db = 8.0f; int vad_reset_period_ms = 1500; int adjacent_speech_frames_threshold = 12; float max_gain_change_db_per_second = 3.0f; float max_output_noise_level_dbfs = -50.0f; - bool sse2_allowed = true; - bool avx2_allowed = true; - bool neon_allowed = true; - // TODO(crbug.com/webrtc/7494): Remove deprecated settings below. - float vad_probability_attack = 1.0f; - LevelEstimator level_estimator = kRms; - int level_estimator_adjacent_speech_frames_threshold = 12; - bool use_saturation_protector = true; - float initial_saturation_margin_db = 25.0f; - float extra_saturation_margin_db = 5.0f; - int gain_applier_adjacent_speech_frames_threshold = 12; } adaptive_digital; } gain_controller2; + // TODO(bugs.webrtc.org/11539): Deprecated. Delete this flag. Replaced by + // injectable submodule. struct ResidualEchoDetector { - bool enabled = true; - } residual_echo_detector; - - // Enables reporting of |output_rms_dbfs| in webrtc::AudioProcessingStats. - struct LevelEstimation { bool enabled = false; - } level_estimation; + } residual_echo_detector; std::string ToString() const; }; - // TODO(mgraczyk): Remove once all methods that use ChannelLayout are gone. - enum ChannelLayout { - kMono, - // Left, right. - kStereo, - // Mono, keyboard, and mic. - kMonoAndKeyboard, - // Left, right, keyboard, and mic. - kStereoAndKeyboard - }; - // Specifies the properties of a setting to be passed to AudioProcessing at // runtime. class RuntimeSetting { @@ -501,7 +435,7 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { } // Creates a runtime setting to notify play-out (aka render) volume changes. - // |volume| is the unnormalized volume, the maximum of which + // `volume` is the unnormalized volume, the maximum of which static RuntimeSetting CreatePlayoutVolumeChange(int volume) { return {Type::kPlayoutVolumeChange, volume}; } @@ -562,32 +496,22 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { // // It is also not necessary to call if the audio parameters (sample // rate and number of channels) have changed. Passing updated parameters - // directly to |ProcessStream()| and |ProcessReverseStream()| is permissible. + // directly to `ProcessStream()` and `ProcessReverseStream()` is permissible. // If the parameters are known at init-time though, they may be provided. // TODO(webrtc:5298): Change to return void. virtual int Initialize() = 0; // The int16 interfaces require: - // - only |NativeRate|s be used + // - only `NativeRate`s be used // - that the input, output and reverse rates must match - // - that |processing_config.output_stream()| matches - // |processing_config.input_stream()|. + // - that `processing_config.output_stream()` matches + // `processing_config.input_stream()`. // // The float interfaces accept arbitrary rates and support differing input and // output layouts, but the output must have either one channel or the same // number of channels as the input. virtual int Initialize(const ProcessingConfig& processing_config) = 0; - // Initialize with unpacked parameters. See Initialize() above for details. - // - // TODO(mgraczyk): Remove once clients are updated to use the new interface. - virtual int Initialize(int capture_input_sample_rate_hz, - int capture_output_sample_rate_hz, - int render_sample_rate_hz, - ChannelLayout capture_input_layout, - ChannelLayout capture_output_layout, - ChannelLayout render_input_layout) = 0; - // TODO(peah): This method is a temporary solution used to take control // over the parameters in the audio processing module and is likely to change. virtual void ApplyConfig(const Config& config) = 0; @@ -616,7 +540,7 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { virtual bool PostRuntimeSetting(RuntimeSetting setting) = 0; // Accepts and produces a 10 ms frame interleaved 16 bit integer audio as - // specified in |input_config| and |output_config|. |src| and |dest| may use + // specified in `input_config` and `output_config`. `src` and `dest` may use // the same memory, if desired. virtual int ProcessStream(const int16_t* const src, const StreamConfig& input_config, @@ -624,35 +548,35 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { int16_t* const dest) = 0; // Accepts deinterleaved float audio with the range [-1, 1]. Each element of - // |src| points to a channel buffer, arranged according to |input_stream|. At - // output, the channels will be arranged according to |output_stream| in - // |dest|. + // `src` points to a channel buffer, arranged according to `input_stream`. At + // output, the channels will be arranged according to `output_stream` in + // `dest`. // - // The output must have one channel or as many channels as the input. |src| - // and |dest| may use the same memory, if desired. + // The output must have one channel or as many channels as the input. `src` + // and `dest` may use the same memory, if desired. virtual int ProcessStream(const float* const* src, const StreamConfig& input_config, const StreamConfig& output_config, float* const* dest) = 0; // Accepts and produces a 10 ms frame of interleaved 16 bit integer audio for - // the reverse direction audio stream as specified in |input_config| and - // |output_config|. |src| and |dest| may use the same memory, if desired. + // the reverse direction audio stream as specified in `input_config` and + // `output_config`. `src` and `dest` may use the same memory, if desired. virtual int ProcessReverseStream(const int16_t* const src, const StreamConfig& input_config, const StreamConfig& output_config, int16_t* const dest) = 0; // Accepts deinterleaved float audio with the range [-1, 1]. Each element of - // |data| points to a channel buffer, arranged according to |reverse_config|. + // `data` points to a channel buffer, arranged according to `reverse_config`. virtual int ProcessReverseStream(const float* const* src, const StreamConfig& input_config, const StreamConfig& output_config, float* const* dest) = 0; // Accepts deinterleaved float audio with the range [-1, 1]. Each element - // of |data| points to a channel buffer, arranged according to - // |reverse_config|. + // of `data` points to a channel buffer, arranged according to + // `reverse_config`. virtual int AnalyzeReverseStream(const float* const* data, const StreamConfig& reverse_config) = 0; @@ -665,7 +589,7 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { // This must be called prior to ProcessStream() if and only if adaptive analog // gain control is enabled, to pass the current analog level from the audio - // HAL. Must be within the range provided in Config::GainController1. + // HAL. Must be within the range [0, 255]. virtual void set_stream_analog_level(int level) = 0; // When an analog mode is set, this should be called after ProcessStream() @@ -675,7 +599,7 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { // This must be called if and only if echo processing is enabled. // - // Sets the |delay| in ms between ProcessReverseStream() receiving a far-end + // Sets the `delay` in ms between ProcessReverseStream() receiving a far-end // frame and ProcessStream() receiving a near-end frame containing the // corresponding echo. On the client-side this can be expressed as // delay = (t_render - t_analyze) + (t_process - t_capture) @@ -695,10 +619,10 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { // Creates and attaches an webrtc::AecDump for recording debugging // information. - // The |worker_queue| may not be null and must outlive the created + // The `worker_queue` may not be null and must outlive the created // AecDump instance. |max_log_size_bytes == -1| means the log size - // will be unlimited. |handle| may not be null. The AecDump takes - // responsibility for |handle| and closes it in the destructor. A + // will be unlimited. `handle` may not be null. The AecDump takes + // responsibility for `handle` and closes it in the destructor. A // return value of true indicates that the file has been // sucessfully opened, while a value of false indicates that // opening the file failed. @@ -726,7 +650,7 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { // Get audio processing statistics. virtual AudioProcessingStats GetStatistics() = 0; - // TODO(webrtc:5298) Deprecated variant. The |has_remote_tracks| argument + // TODO(webrtc:5298) Deprecated variant. The `has_remote_tracks` argument // should be set if there are active remote tracks (this would usually be true // during a call). If there are no remote tracks some of the stats will not be // set by AudioProcessing, because they only make sense if there is at least @@ -782,71 +706,73 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface { class RTC_EXPORT AudioProcessingBuilder { public: AudioProcessingBuilder(); + AudioProcessingBuilder(const AudioProcessingBuilder&) = delete; + AudioProcessingBuilder& operator=(const AudioProcessingBuilder&) = delete; ~AudioProcessingBuilder(); - // The AudioProcessingBuilder takes ownership of the echo_control_factory. + + // Sets the APM configuration. + AudioProcessingBuilder& SetConfig(const AudioProcessing::Config& config) { + config_ = config; + return *this; + } + + // Sets the echo controller factory to inject when APM is created. AudioProcessingBuilder& SetEchoControlFactory( std::unique_ptr echo_control_factory) { echo_control_factory_ = std::move(echo_control_factory); return *this; } - // The AudioProcessingBuilder takes ownership of the capture_post_processing. + + // Sets the capture post-processing sub-module to inject when APM is created. AudioProcessingBuilder& SetCapturePostProcessing( std::unique_ptr capture_post_processing) { capture_post_processing_ = std::move(capture_post_processing); return *this; } - // The AudioProcessingBuilder takes ownership of the render_pre_processing. + + // Sets the render pre-processing sub-module to inject when APM is created. AudioProcessingBuilder& SetRenderPreProcessing( std::unique_ptr render_pre_processing) { render_pre_processing_ = std::move(render_pre_processing); return *this; } - // The AudioProcessingBuilder takes ownership of the echo_detector. + + // Sets the echo detector to inject when APM is created. AudioProcessingBuilder& SetEchoDetector( rtc::scoped_refptr echo_detector) { echo_detector_ = std::move(echo_detector); return *this; } - // The AudioProcessingBuilder takes ownership of the capture_analyzer. + + // Sets the capture analyzer sub-module to inject when APM is created. AudioProcessingBuilder& SetCaptureAnalyzer( std::unique_ptr capture_analyzer) { capture_analyzer_ = std::move(capture_analyzer); return *this; } - // This creates an APM instance using the previously set components. Calling - // the Create function resets the AudioProcessingBuilder to its initial state. - AudioProcessing* Create(); - AudioProcessing* Create(const webrtc::Config& config); + + // Creates an APM instance with the specified config or the default one if + // unspecified. Injects the specified components transferring the ownership + // to the newly created APM instance - i.e., except for the config, the + // builder is reset to its initial state. + rtc::scoped_refptr Create(); private: + AudioProcessing::Config config_; std::unique_ptr echo_control_factory_; std::unique_ptr capture_post_processing_; std::unique_ptr render_pre_processing_; rtc::scoped_refptr echo_detector_; std::unique_ptr capture_analyzer_; - RTC_DISALLOW_COPY_AND_ASSIGN(AudioProcessingBuilder); }; class StreamConfig { public: // sample_rate_hz: The sampling rate of the stream. - // - // num_channels: The number of audio channels in the stream, excluding the - // keyboard channel if it is present. When passing a - // StreamConfig with an array of arrays T*[N], - // - // N == {num_channels + 1 if has_keyboard - // {num_channels if !has_keyboard - // - // has_keyboard: True if the stream has a keyboard channel. When has_keyboard - // is true, the last channel in any corresponding list of - // channels is the keyboard channel. - StreamConfig(int sample_rate_hz = 0, - size_t num_channels = 0, - bool has_keyboard = false) + // num_channels: The number of audio channels in the stream. + StreamConfig(int sample_rate_hz = 0, size_t num_channels = 0) : sample_rate_hz_(sample_rate_hz), num_channels_(num_channels), - has_keyboard_(has_keyboard), num_frames_(calculate_frames(sample_rate_hz)) {} void set_sample_rate_hz(int value) { @@ -854,22 +780,18 @@ class StreamConfig { num_frames_ = calculate_frames(value); } void set_num_channels(size_t value) { num_channels_ = value; } - void set_has_keyboard(bool value) { has_keyboard_ = value; } int sample_rate_hz() const { return sample_rate_hz_; } - // The number of channels in the stream, not including the keyboard channel if - // present. + // The number of channels in the stream. size_t num_channels() const { return num_channels_; } - bool has_keyboard() const { return has_keyboard_; } size_t num_frames() const { return num_frames_; } size_t num_samples() const { return num_channels_ * num_frames_; } bool operator==(const StreamConfig& other) const { return sample_rate_hz_ == other.sample_rate_hz_ && - num_channels_ == other.num_channels_ && - has_keyboard_ == other.has_keyboard_; + num_channels_ == other.num_channels_; } bool operator!=(const StreamConfig& other) const { return !(*this == other); } @@ -882,7 +804,6 @@ class StreamConfig { int sample_rate_hz_; size_t num_channels_; - bool has_keyboard_; size_t num_frames_; }; @@ -972,17 +893,13 @@ class EchoDetector : public rtc::RefCountInterface { int render_sample_rate_hz, int num_render_channels) = 0; - // Analysis (not changing) of the render signal. + // Analysis (not changing) of the first channel of the render signal. virtual void AnalyzeRenderAudio(rtc::ArrayView render_audio) = 0; // Analysis (not changing) of the capture signal. virtual void AnalyzeCaptureAudio( rtc::ArrayView capture_audio) = 0; - // Pack an AudioBuffer into a vector. - static void PackRenderAudioBuffer(AudioBuffer* audio, - std::vector* packed_buffer); - struct Metrics { absl::optional echo_likelihood; absl::optional echo_likelihood_recent_max; diff --git a/modules/audio_processing/include/audio_processing_statistics.h b/modules/audio_processing/include/audio_processing_statistics.h index 87babee241..3b43319951 100644 --- a/modules/audio_processing/include/audio_processing_statistics.h +++ b/modules/audio_processing/include/audio_processing_statistics.h @@ -24,14 +24,8 @@ struct RTC_EXPORT AudioProcessingStats { AudioProcessingStats(const AudioProcessingStats& other); ~AudioProcessingStats(); - // The root mean square (RMS) level in dBFS (decibels from digital - // full-scale) of the last capture frame, after processing. It is - // constrained to [-127, 0]. - // The computation follows: https://tools.ietf.org/html/rfc6465 - // with the intent that it can provide the RTP audio level indication. - // Only reported if level estimation is enabled in AudioProcessing::Config. - absl::optional output_rms_dbfs; - + // Deprecated. + // TODO(bugs.webrtc.org/11226): Remove. // True if voice is detected in the last capture frame, after processing. // It is conservative in flagging audio as speech, with low likelihood of // incorrectly flagging a frame as voice. @@ -50,9 +44,9 @@ struct RTC_EXPORT AudioProcessingStats { // The delay metrics consists of the delay median and standard deviation. It // also consists of the fraction of delay estimates that can make the echo // cancellation perform poorly. The values are aggregated until the first - // call to |GetStatistics()| and afterwards aggregated and updated every + // call to `GetStatistics()` and afterwards aggregated and updated every // second. Note that if there are several clients pulling metrics from - // |GetStatistics()| during a session the first call from any of them will + // `GetStatistics()` during a session the first call from any of them will // change to one second aggregation window for all. absl::optional delay_median_ms; absl::optional delay_standard_deviation_ms; @@ -64,7 +58,7 @@ struct RTC_EXPORT AudioProcessingStats { // The instantaneous delay estimate produced in the AEC. The unit is in // milliseconds and the value is the instantaneous value at the time of the - // call to |GetStatistics()|. + // call to `GetStatistics()`. absl::optional delay_ms; }; diff --git a/modules/audio_processing/include/config.h b/modules/audio_processing/include/config.h deleted file mode 100644 index 7fab17831c..0000000000 --- a/modules/audio_processing/include/config.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_CONFIG_H_ -#define MODULES_AUDIO_PROCESSING_INCLUDE_CONFIG_H_ - -#include - -#include "rtc_base/system/rtc_export.h" - -namespace webrtc { - -// Only add new values to the end of the enumeration and never remove (only -// deprecate) to maintain binary compatibility. -enum class ConfigOptionID { - kMyExperimentForTest, - kAlgo1CostFunctionForTest, - kTemporalLayersFactory, // Deprecated - kNetEqCapacityConfig, // Deprecated - kNetEqFastAccelerate, // Deprecated - kVoicePacing, // Deprecated - kExtendedFilter, // Deprecated - kDelayAgnostic, // Deprecated - kExperimentalAgc, - kExperimentalNs, - kBeamforming, // Deprecated - kIntelligibility, // Deprecated - kEchoCanceller3, // Deprecated - kAecRefinedAdaptiveFilter, // Deprecated - kLevelControl // Deprecated -}; - -// Class Config is designed to ease passing a set of options across webrtc code. -// Options are identified by typename in order to avoid incorrect casts. -// -// Usage: -// * declaring an option: -// struct Algo1_CostFunction { -// virtual float cost(int x) const { return x; } -// virtual ~Algo1_CostFunction() {} -// }; -// -// * accessing an option: -// config.Get().cost(value); -// -// * setting an option: -// struct SqrCost : Algo1_CostFunction { -// virtual float cost(int x) const { return x*x; } -// }; -// config.Set(new SqrCost()); -// -// Note: This class is thread-compatible (like STL containers). -class RTC_EXPORT Config { - public: - // Returns the option if set or a default constructed one. - // Callers that access options too often are encouraged to cache the result. - // Returned references are owned by this. - // - // Requires std::is_default_constructible - template - const T& Get() const; - - // Set the option, deleting any previous instance of the same. - // This instance gets ownership of the newly set value. - template - void Set(T* value); - - Config(); - ~Config(); - - private: - struct BaseOption { - virtual ~BaseOption() {} - }; - - template - struct Option : BaseOption { - explicit Option(T* v) : value(v) {} - ~Option() { delete value; } - T* value; - }; - - template - static ConfigOptionID identifier() { - return T::identifier; - } - - // Used to instantiate a default constructed object that doesn't needs to be - // owned. This allows Get to be implemented without requiring explicitly - // locks. - template - static const T& default_value() { - static const T* const def = new T(); - return *def; - } - - typedef std::map OptionMap; - OptionMap options_; - - Config(const Config&); - void operator=(const Config&); -}; - -template -const T& Config::Get() const { - OptionMap::const_iterator it = options_.find(identifier()); - if (it != options_.end()) { - const T* t = static_cast*>(it->second)->value; - if (t) { - return *t; - } - } - return default_value(); -} - -template -void Config::Set(T* value) { - BaseOption*& it = options_[identifier()]; - delete it; - it = new Option(value); -} -} // namespace webrtc - -#endif // MODULES_AUDIO_PROCESSING_INCLUDE_CONFIG_H_ diff --git a/modules/audio_processing/include/mock_audio_processing.h b/modules/audio_processing/include/mock_audio_processing.h index 46c5f0efbe..f88094f5ea 100644 --- a/modules/audio_processing/include/mock_audio_processing.h +++ b/modules/audio_processing/include/mock_audio_processing.h @@ -67,6 +67,27 @@ class MockEchoControl : public EchoControl { MOCK_METHOD(bool, ActiveProcessing, (), (const, override)); }; +class MockEchoDetector : public EchoDetector { + public: + virtual ~MockEchoDetector() {} + MOCK_METHOD(void, + Initialize, + (int capture_sample_rate_hz, + int num_capture_channels, + int render_sample_rate_hz, + int num_render_channels), + (override)); + MOCK_METHOD(void, + AnalyzeRenderAudio, + (rtc::ArrayView render_audio), + (override)); + MOCK_METHOD(void, + AnalyzeCaptureAudio, + (rtc::ArrayView capture_audio), + (override)); + MOCK_METHOD(Metrics, GetMetrics, (), (const, override)); +}; + class MockAudioProcessing : public AudioProcessing { public: MockAudioProcessing() {} @@ -74,15 +95,6 @@ class MockAudioProcessing : public AudioProcessing { virtual ~MockAudioProcessing() {} MOCK_METHOD(int, Initialize, (), (override)); - MOCK_METHOD(int, - Initialize, - (int capture_input_sample_rate_hz, - int capture_output_sample_rate_hz, - int render_sample_rate_hz, - ChannelLayout capture_input_layout, - ChannelLayout capture_output_layout, - ChannelLayout render_input_layout), - (override)); MOCK_METHOD(int, Initialize, (const ProcessingConfig& processing_config), diff --git a/modules/audio_processing/level_estimator.cc b/modules/audio_processing/level_estimator.cc deleted file mode 100644 index e70728843a..0000000000 --- a/modules/audio_processing/level_estimator.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "modules/audio_processing/level_estimator.h" - -#include "api/array_view.h" - -namespace webrtc { - -LevelEstimator::LevelEstimator() { - rms_.Reset(); -} - -LevelEstimator::~LevelEstimator() = default; - -void LevelEstimator::ProcessStream(const AudioBuffer& audio) { - for (size_t i = 0; i < audio.num_channels(); i++) { - rms_.Analyze(rtc::ArrayView(audio.channels_const()[i], - audio.num_frames())); - } -} -} // namespace webrtc diff --git a/modules/audio_processing/level_estimator.h b/modules/audio_processing/level_estimator.h deleted file mode 100644 index 1d8a0712b5..0000000000 --- a/modules/audio_processing/level_estimator.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef MODULES_AUDIO_PROCESSING_LEVEL_ESTIMATOR_H_ -#define MODULES_AUDIO_PROCESSING_LEVEL_ESTIMATOR_H_ - -#include "modules/audio_processing/audio_buffer.h" -#include "modules/audio_processing/rms_level.h" - -namespace webrtc { - -// An estimation component used to retrieve level metrics. -class LevelEstimator { - public: - LevelEstimator(); - ~LevelEstimator(); - - LevelEstimator(LevelEstimator&) = delete; - LevelEstimator& operator=(LevelEstimator&) = delete; - - void ProcessStream(const AudioBuffer& audio); - - // Returns the root mean square (RMS) level in dBFs (decibels from digital - // full-scale), or alternately dBov. It is computed over all primary stream - // frames since the last call to RMS(). The returned value is positive but - // should be interpreted as negative. It is constrained to [0, 127]. - // - // The computation follows: https://tools.ietf.org/html/rfc6465 - // with the intent that it can provide the RTP audio level indication. - // - // Frames passed to ProcessStream() with an |_energy| of zero are considered - // to have been muted. The RMS of the frame will be interpreted as -127. - int RMS() { return rms_.Average(); } - - private: - RmsLevel rms_; -}; -} // namespace webrtc - -#endif // MODULES_AUDIO_PROCESSING_LEVEL_ESTIMATOR_H_ diff --git a/modules/audio_processing/level_estimator_unittest.cc b/modules/audio_processing/level_estimator_unittest.cc deleted file mode 100644 index 7660b677fd..0000000000 --- a/modules/audio_processing/level_estimator_unittest.cc +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#include - -#include "api/array_view.h" -#include "modules/audio_processing/audio_buffer.h" -#include "modules/audio_processing/level_estimator.h" -#include "modules/audio_processing/test/audio_buffer_tools.h" -#include "modules/audio_processing/test/bitexactness_tools.h" -#include "test/gtest.h" - -namespace webrtc { -namespace { - -const int kNumFramesToProcess = 1000; - -// Processes a specified amount of frames, verifies the results and reports -// any errors. -void RunBitexactnessTest(int sample_rate_hz, - size_t num_channels, - int rms_reference) { - LevelEstimator level_estimator; - int samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, 100); - StreamConfig capture_config(sample_rate_hz, num_channels, false); - AudioBuffer capture_buffer( - capture_config.sample_rate_hz(), capture_config.num_channels(), - capture_config.sample_rate_hz(), capture_config.num_channels(), - capture_config.sample_rate_hz(), capture_config.num_channels()); - - test::InputAudioFile capture_file( - test::GetApmCaptureTestVectorFileName(sample_rate_hz)); - std::vector capture_input(samples_per_channel * num_channels); - for (size_t frame_no = 0; frame_no < kNumFramesToProcess; ++frame_no) { - ReadFloatSamplesFromStereoFile(samples_per_channel, num_channels, - &capture_file, capture_input); - - test::CopyVectorToAudioBuffer(capture_config, capture_input, - &capture_buffer); - - level_estimator.ProcessStream(capture_buffer); - } - - // Extract test results. - int rms = level_estimator.RMS(); - - // Compare the output to the reference. - EXPECT_EQ(rms_reference, rms); -} - -} // namespace - -TEST(LevelEstimatorBitExactnessTest, Mono8kHz) { - const int kRmsReference = 31; - - RunBitexactnessTest(8000, 1, kRmsReference); -} - -TEST(LevelEstimatorBitExactnessTest, Mono16kHz) { - const int kRmsReference = 31; - - RunBitexactnessTest(16000, 1, kRmsReference); -} - -TEST(LevelEstimatorBitExactnessTest, Mono32kHz) { - const int kRmsReference = 31; - - RunBitexactnessTest(32000, 1, kRmsReference); -} - -TEST(LevelEstimatorBitExactnessTest, Mono48kHz) { - const int kRmsReference = 31; - - RunBitexactnessTest(48000, 1, kRmsReference); -} - -TEST(LevelEstimatorBitExactnessTest, Stereo16kHz) { - const int kRmsReference = 30; - - RunBitexactnessTest(16000, 2, kRmsReference); -} - -} // namespace webrtc diff --git a/modules/audio_processing/ns/suppression_params.cc b/modules/audio_processing/ns/suppression_params.cc index 9a6bd5a587..7bf18346f9 100644 --- a/modules/audio_processing/ns/suppression_params.cc +++ b/modules/audio_processing/ns/suppression_params.cc @@ -42,7 +42,7 @@ SuppressionParams::SuppressionParams( use_attenuation_adjustment = true; break; default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } } diff --git a/modules/audio_processing/optionally_built_submodule_creators.h b/modules/audio_processing/optionally_built_submodule_creators.h index c96e66f975..7de337b277 100644 --- a/modules/audio_processing/optionally_built_submodule_creators.h +++ b/modules/audio_processing/optionally_built_submodule_creators.h @@ -20,7 +20,7 @@ namespace webrtc { // These overrides are only to be used for testing purposes. // Each flag emulates a preprocessor macro to exclude a submodule of APM from // the build, e.g. WEBRTC_EXCLUDE_TRANSIENT_SUPPRESSOR. If the corresponding -// flag |transient_suppression| is enabled, then the creators will return +// flag `transient_suppression` is enabled, then the creators will return // nullptr instead of a submodule instance, as if the macro had been defined. struct ApmSubmoduleCreationOverrides { bool transient_suppression = false; @@ -29,7 +29,7 @@ struct ApmSubmoduleCreationOverrides { // Creates a transient suppressor. // Will instead return nullptr if one of the following is true: // * WEBRTC_EXCLUDE_TRANSIENT_SUPPRESSOR is defined -// * The corresponding override in |overrides| is enabled. +// * The corresponding override in `overrides` is enabled. std::unique_ptr CreateTransientSuppressor( const ApmSubmoduleCreationOverrides& overrides); diff --git a/modules/audio_processing/residual_echo_detector.cc b/modules/audio_processing/residual_echo_detector.cc index 618888361f..fe1149a896 100644 --- a/modules/audio_processing/residual_echo_detector.cc +++ b/modules/audio_processing/residual_echo_detector.cc @@ -14,7 +14,6 @@ #include #include "absl/types/optional.h" -#include "modules/audio_processing/audio_buffer.h" #include "modules/audio_processing/logging/apm_data_dumper.h" #include "rtc_base/atomic_ops.h" #include "rtc_base/checks.h" @@ -199,13 +198,6 @@ void ResidualEchoDetector::Initialize(int /*capture_sample_rate_hz*/, reliability_ = 0.f; } -void EchoDetector::PackRenderAudioBuffer(AudioBuffer* audio, - std::vector* packed_buffer) { - packed_buffer->clear(); - packed_buffer->insert(packed_buffer->end(), audio->channels()[0], - audio->channels()[0] + audio->num_frames()); -} - EchoDetector::Metrics ResidualEchoDetector::GetMetrics() const { EchoDetector::Metrics metrics; metrics.echo_likelihood = echo_likelihood_; diff --git a/modules/audio_processing/residual_echo_detector.h b/modules/audio_processing/residual_echo_detector.h index 5d18ecbb22..44252af655 100644 --- a/modules/audio_processing/residual_echo_detector.h +++ b/modules/audio_processing/residual_echo_detector.h @@ -51,12 +51,12 @@ class ResidualEchoDetector : public EchoDetector { private: static int instance_count_; std::unique_ptr data_dumper_; - // Keep track if the |Process| function has been previously called. + // Keep track if the `Process` function has been previously called. bool first_process_call_ = true; // Buffer for storing the power of incoming farend buffers. This is needed for // cases where calls to BufferFarend and Process are jittery. CircularBuffer render_buffer_; - // Count how long ago it was that the size of |render_buffer_| was zero. This + // Count how long ago it was that the size of `render_buffer_` was zero. This // value is also reset to zero when clock drift is detected and a value from // the renderbuffer is discarded, even though the buffer is not actually zero // at that point. This is done to avoid repeatedly removing elements in this diff --git a/modules/audio_processing/rms_level.h b/modules/audio_processing/rms_level.h index e1a6d565a9..4955d1b308 100644 --- a/modules/audio_processing/rms_level.h +++ b/modules/audio_processing/rms_level.h @@ -47,7 +47,7 @@ class RmsLevel { void Analyze(rtc::ArrayView data); void Analyze(rtc::ArrayView data); - // If all samples with the given |length| have a magnitude of zero, this is + // If all samples with the given `length` have a magnitude of zero, this is // a shortcut to avoid some computation. void AnalyzeMuted(size_t length); @@ -62,7 +62,7 @@ class RmsLevel { Levels AverageAndPeak(); private: - // Compares |block_size| with |block_size_|. If they are different, calls + // Compares `block_size` with `block_size_`. If they are different, calls // Reset() and stores the new size. void CheckBlockSize(size_t block_size); diff --git a/modules/audio_processing/test/aec_dump_based_simulator.cc b/modules/audio_processing/test/aec_dump_based_simulator.cc index 4703ee30c7..621c3a43d0 100644 --- a/modules/audio_processing/test/aec_dump_based_simulator.cc +++ b/modules/audio_processing/test/aec_dump_based_simulator.cc @@ -505,10 +505,6 @@ void AecDumpBasedSimulator::HandleMessage( << msg.experiments_description() << std::endl; } - if (settings_.use_ed) { - apm_config.residual_echo_detector.enabled = *settings_.use_ed; - } - ap_->ApplyConfig(apm_config); } } diff --git a/modules/audio_processing/test/audio_processing_builder_for_testing.cc b/modules/audio_processing/test/audio_processing_builder_for_testing.cc index faab18f7fd..72f75ed1e0 100644 --- a/modules/audio_processing/test/audio_processing_builder_for_testing.cc +++ b/modules/audio_processing/test/audio_processing_builder_for_testing.cc @@ -23,32 +23,19 @@ AudioProcessingBuilderForTesting::~AudioProcessingBuilderForTesting() = default; #ifdef WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE -AudioProcessing* AudioProcessingBuilderForTesting::Create() { - webrtc::Config config; - return Create(config); -} - -AudioProcessing* AudioProcessingBuilderForTesting::Create( - const webrtc::Config& config) { - return new rtc::RefCountedObject( - config, std::move(capture_post_processing_), +rtc::scoped_refptr AudioProcessingBuilderForTesting::Create() { + return rtc::make_ref_counted( + config_, std::move(capture_post_processing_), std::move(render_pre_processing_), std::move(echo_control_factory_), std::move(echo_detector_), std::move(capture_analyzer_)); } #else -AudioProcessing* AudioProcessingBuilderForTesting::Create() { - AudioProcessingBuilder builder; - TransferOwnershipsToBuilder(&builder); - return builder.Create(); -} - -AudioProcessing* AudioProcessingBuilderForTesting::Create( - const webrtc::Config& config) { +rtc::scoped_refptr AudioProcessingBuilderForTesting::Create() { AudioProcessingBuilder builder; TransferOwnershipsToBuilder(&builder); - return builder.Create(config); + return builder.SetConfig(config_).Create(); } #endif @@ -57,9 +44,9 @@ void AudioProcessingBuilderForTesting::TransferOwnershipsToBuilder( AudioProcessingBuilder* builder) { builder->SetCapturePostProcessing(std::move(capture_post_processing_)); builder->SetRenderPreProcessing(std::move(render_pre_processing_)); - builder->SetCaptureAnalyzer(std::move(capture_analyzer_)); builder->SetEchoControlFactory(std::move(echo_control_factory_)); builder->SetEchoDetector(std::move(echo_detector_)); + builder->SetCaptureAnalyzer(std::move(capture_analyzer_)); } } // namespace webrtc diff --git a/modules/audio_processing/test/audio_processing_builder_for_testing.h b/modules/audio_processing/test/audio_processing_builder_for_testing.h index a245450d45..e73706c1b6 100644 --- a/modules/audio_processing/test/audio_processing_builder_for_testing.h +++ b/modules/audio_processing/test/audio_processing_builder_for_testing.h @@ -24,51 +24,65 @@ namespace webrtc { class AudioProcessingBuilderForTesting { public: AudioProcessingBuilderForTesting(); + AudioProcessingBuilderForTesting(const AudioProcessingBuilderForTesting&) = + delete; + AudioProcessingBuilderForTesting& operator=( + const AudioProcessingBuilderForTesting&) = delete; ~AudioProcessingBuilderForTesting(); - // The AudioProcessingBuilderForTesting takes ownership of the - // echo_control_factory. + + // Sets the APM configuration. + AudioProcessingBuilderForTesting& SetConfig( + const AudioProcessing::Config& config) { + config_ = config; + return *this; + } + + // Sets the echo controller factory to inject when APM is created. AudioProcessingBuilderForTesting& SetEchoControlFactory( std::unique_ptr echo_control_factory) { echo_control_factory_ = std::move(echo_control_factory); return *this; } - // The AudioProcessingBuilderForTesting takes ownership of the - // capture_post_processing. + + // Sets the capture post-processing sub-module to inject when APM is created. AudioProcessingBuilderForTesting& SetCapturePostProcessing( std::unique_ptr capture_post_processing) { capture_post_processing_ = std::move(capture_post_processing); return *this; } - // The AudioProcessingBuilderForTesting takes ownership of the - // render_pre_processing. + + // Sets the render pre-processing sub-module to inject when APM is created. AudioProcessingBuilderForTesting& SetRenderPreProcessing( std::unique_ptr render_pre_processing) { render_pre_processing_ = std::move(render_pre_processing); return *this; } - // The AudioProcessingBuilderForTesting takes ownership of the echo_detector. + + // Sets the echo detector to inject when APM is created. AudioProcessingBuilderForTesting& SetEchoDetector( rtc::scoped_refptr echo_detector) { echo_detector_ = std::move(echo_detector); return *this; } - // The AudioProcessingBuilderForTesting takes ownership of the - // capture_analyzer. + + // Sets the capture analyzer sub-module to inject when APM is created. AudioProcessingBuilderForTesting& SetCaptureAnalyzer( std::unique_ptr capture_analyzer) { capture_analyzer_ = std::move(capture_analyzer); return *this; } - // This creates an APM instance using the previously set components. Calling - // the Create function resets the AudioProcessingBuilderForTesting to its - // initial state. - AudioProcessing* Create(); - AudioProcessing* Create(const webrtc::Config& config); + + // Creates an APM instance with the specified config or the default one if + // unspecified. Injects the specified components transferring the ownership + // to the newly created APM instance - i.e., except for the config, the + // builder is reset to its initial state. + rtc::scoped_refptr Create(); private: // Transfers the ownership to a non-testing builder. void TransferOwnershipsToBuilder(AudioProcessingBuilder* builder); + AudioProcessing::Config config_; std::unique_ptr echo_control_factory_; std::unique_ptr capture_post_processing_; std::unique_ptr render_pre_processing_; diff --git a/modules/audio_processing/test/audio_processing_simulator.cc b/modules/audio_processing/test/audio_processing_simulator.cc index 1f05f43120..4915648fa9 100644 --- a/modules/audio_processing/test/audio_processing_simulator.cc +++ b/modules/audio_processing/test/audio_processing_simulator.cc @@ -20,6 +20,7 @@ #include "api/audio/echo_canceller3_config_json.h" #include "api/audio/echo_canceller3_factory.h" +#include "api/audio/echo_detector_creator.h" #include "modules/audio_processing/aec_dump/aec_dump_factory.h" #include "modules/audio_processing/echo_control_mobile_impl.h" #include "modules/audio_processing/include/audio_processing.h" @@ -188,6 +189,10 @@ AudioProcessingSimulator::AudioProcessingSimulator( builder->SetEchoControlFactory(std::move(echo_control_factory)); } + if (settings_.use_ed && *settings.use_ed) { + builder->SetEchoDetector(CreateEchoDetector()); + } + // Create an audio processing object. ap_ = builder->Create(); RTC_CHECK(ap_); @@ -206,7 +211,7 @@ void AudioProcessingSimulator::ProcessStream(bool fixed_interface) { if (settings_.simulate_mic_gain) { if (settings_.aec_dump_input_filename) { // When the analog gain is simulated and an AEC dump is used as input, set - // the undo level to |aec_dump_mic_level_| to virtually restore the + // the undo level to `aec_dump_mic_level_` to virtually restore the // unmodified microphone signal level. fake_recording_device_.SetUndoMicLevel(aec_dump_mic_level_); } @@ -261,7 +266,7 @@ void AudioProcessingSimulator::ProcessStream(bool fixed_interface) { // Store the mic level suggested by AGC. // Note that when the analog gain is simulated and an AEC dump is used as - // input, |analog_mic_level_| will not be used with set_stream_analog_level(). + // input, `analog_mic_level_` will not be used with set_stream_analog_level(). analog_mic_level_ = ap_->recommended_stream_analog_level(); if (settings_.simulate_mic_gain) { fake_recording_device_.SetMicLevel(analog_mic_level_); @@ -487,8 +492,6 @@ void AudioProcessingSimulator::ConfigureAudioProcessor() { if (settings_.agc2_use_adaptive_gain) { apm_config.gain_controller2.adaptive_digital.enabled = *settings_.agc2_use_adaptive_gain; - apm_config.gain_controller2.adaptive_digital.level_estimator = - settings_.agc2_adaptive_level_estimator; } } if (settings_.use_pre_amplifier) { @@ -540,14 +543,6 @@ void AudioProcessingSimulator::ConfigureAudioProcessor() { apm_config.high_pass_filter.enabled = *settings_.use_hpf; } - if (settings_.use_le) { - apm_config.level_estimation.enabled = *settings_.use_le; - } - - if (settings_.use_vad) { - apm_config.voice_detection.enabled = *settings_.use_vad; - } - if (settings_.use_agc) { apm_config.gain_controller1.enabled = *settings_.use_agc; } @@ -575,10 +570,6 @@ void AudioProcessingSimulator::ConfigureAudioProcessor() { *settings_.analog_agc_disable_digital_adaptive; } - if (settings_.use_ed) { - apm_config.residual_echo_detector.enabled = *settings_.use_ed; - } - if (settings_.maximum_internal_processing_rate) { apm_config.pipeline.maximum_internal_processing_rate = *settings_.maximum_internal_processing_rate; diff --git a/modules/audio_processing/test/audio_processing_simulator.h b/modules/audio_processing/test/audio_processing_simulator.h index 9539e58b1b..af76d7e1c9 100644 --- a/modules/audio_processing/test/audio_processing_simulator.h +++ b/modules/audio_processing/test/audio_processing_simulator.h @@ -38,7 +38,7 @@ struct Int16Frame { samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, kChunksPerSecond); this->num_channels = num_channels; - config = StreamConfig(sample_rate_hz, num_channels, /*has_keyboard=*/false); + config = StreamConfig(sample_rate_hz, num_channels); data.resize(num_channels * samples_per_channel); } @@ -105,8 +105,6 @@ struct SimulationSettings { absl::optional use_ns; absl::optional use_ts; absl::optional use_analog_agc; - absl::optional use_vad; - absl::optional use_le; absl::optional use_all; absl::optional analog_agc_disable_digital_adaptive; absl::optional agc_mode; @@ -115,8 +113,6 @@ struct SimulationSettings { absl::optional agc_compression_gain; absl::optional agc2_use_adaptive_gain; absl::optional agc2_fixed_gain_db; - AudioProcessing::Config::GainController2::LevelEstimator - agc2_adaptive_level_estimator; absl::optional pre_amplifier_gain_factor; absl::optional pre_gain_factor; absl::optional post_gain_factor; diff --git a/modules/audio_processing/test/audioproc_float_impl.cc b/modules/audio_processing/test/audioproc_float_impl.cc index 1fc39bb6b9..aab1881913 100644 --- a/modules/audio_processing/test/audioproc_float_impl.cc +++ b/modules/audio_processing/test/audioproc_float_impl.cc @@ -117,14 +117,6 @@ ABSL_FLAG(int, analog_agc, kParameterNotSpecifiedValue, "Activate (1) or deactivate (0) the analog AGC"); -ABSL_FLAG(int, - vad, - kParameterNotSpecifiedValue, - "Activate (1) or deactivate (0) the voice activity detector"); -ABSL_FLAG(int, - le, - kParameterNotSpecifiedValue, - "Activate (1) or deactivate (0) the level estimator"); ABSL_FLAG(bool, all_default, false, @@ -159,10 +151,6 @@ ABSL_FLAG(float, agc2_fixed_gain_db, kParameterNotSpecifiedValue, "AGC2 fixed gain (dB) to apply"); -ABSL_FLAG(std::string, - agc2_adaptive_level_estimator, - "RMS", - "AGC2 adaptive digital level estimator to use [RMS, peak]"); ABSL_FLAG(float, pre_amplifier_gain_factor, kParameterNotSpecifiedValue, @@ -341,10 +329,6 @@ const char kUsageDescription[] = "processing module, either based on wav files or " "protobuf debug dump recordings.\n"; -std::vector GetAgc2AdaptiveLevelEstimatorNames() { - return {"RMS", "peak"}; -} - void SetSettingIfSpecified(const std::string& value, absl::optional* parameter) { if (value.compare("") != 0) { @@ -374,32 +358,9 @@ void SetSettingIfFlagSet(int32_t flag, absl::optional* parameter) { } } -AudioProcessing::Config::GainController2::LevelEstimator -MapAgc2AdaptiveLevelEstimator(absl::string_view name) { - if (name.compare("RMS") == 0) { - return AudioProcessing::Config::GainController2::LevelEstimator::kRms; - } - if (name.compare("peak") == 0) { - return AudioProcessing::Config::GainController2::LevelEstimator::kPeak; - } - auto concat_strings = - [](const std::vector& strings) -> std::string { - rtc::StringBuilder ss; - for (const auto& s : strings) { - ss << " " << s; - } - return ss.Release(); - }; - RTC_CHECK(false) - << "Invalid value for agc2_adaptive_level_estimator, valid options:" - << concat_strings(GetAgc2AdaptiveLevelEstimatorNames()) << "."; -} - SimulationSettings CreateSettings() { SimulationSettings settings; if (absl::GetFlag(FLAGS_all_default)) { - settings.use_le = true; - settings.use_vad = true; settings.use_ts = true; settings.use_analog_agc = true; settings.use_ns = true; @@ -451,8 +412,6 @@ SimulationSettings CreateSettings() { SetSettingIfSpecified(absl::GetFlag(FLAGS_ts), &settings.use_ts); SetSettingIfFlagSet(absl::GetFlag(FLAGS_analog_agc), &settings.use_analog_agc); - SetSettingIfFlagSet(absl::GetFlag(FLAGS_vad), &settings.use_vad); - SetSettingIfFlagSet(absl::GetFlag(FLAGS_le), &settings.use_le); SetSettingIfFlagSet(absl::GetFlag(FLAGS_analog_agc_disable_digital_adaptive), &settings.analog_agc_disable_digital_adaptive); SetSettingIfSpecified(absl::GetFlag(FLAGS_agc_mode), &settings.agc_mode); @@ -467,8 +426,6 @@ SimulationSettings CreateSettings() { SetSettingIfSpecified(absl::GetFlag(FLAGS_agc2_fixed_gain_db), &settings.agc2_fixed_gain_db); - settings.agc2_adaptive_level_estimator = MapAgc2AdaptiveLevelEstimator( - absl::GetFlag(FLAGS_agc2_adaptive_level_estimator)); SetSettingIfSpecified(absl::GetFlag(FLAGS_pre_amplifier_gain_factor), &settings.pre_amplifier_gain_factor); SetSettingIfSpecified(absl::GetFlag(FLAGS_pre_gain_factor), diff --git a/modules/audio_processing/test/audioproc_float_impl.h b/modules/audio_processing/test/audioproc_float_impl.h index 0687c43a5d..5ed3aefab7 100644 --- a/modules/audio_processing/test/audioproc_float_impl.h +++ b/modules/audio_processing/test/audioproc_float_impl.h @@ -19,11 +19,11 @@ namespace webrtc { namespace test { // This function implements the audio processing simulation utility. Pass -// |input_aecdump| to provide the content of an AEC dump file as a string; if -// |input_aecdump| is not passed, a WAV or AEC input dump file must be specified -// via the |argv| argument. Pass |processed_capture_samples| to write in it the -// samples processed on the capture side; if |processed_capture_samples| is not -// passed, the output file can optionally be specified via the |argv| argument. +// `input_aecdump` to provide the content of an AEC dump file as a string; if +// `input_aecdump` is not passed, a WAV or AEC input dump file must be specified +// via the `argv` argument. Pass `processed_capture_samples` to write in it the +// samples processed on the capture side; if `processed_capture_samples` is not +// passed, the output file can optionally be specified via the `argv` argument. // Any audio_processing object specified in the input is used for the // simulation. Note that when the audio_processing object is specified all // functionality that relies on using the internal builder is deactivated, @@ -34,11 +34,11 @@ int AudioprocFloatImpl(rtc::scoped_refptr audio_processing, char* argv[]); // This function implements the audio processing simulation utility. Pass -// |input_aecdump| to provide the content of an AEC dump file as a string; if -// |input_aecdump| is not passed, a WAV or AEC input dump file must be specified -// via the |argv| argument. Pass |processed_capture_samples| to write in it the -// samples processed on the capture side; if |processed_capture_samples| is not -// passed, the output file can optionally be specified via the |argv| argument. +// `input_aecdump` to provide the content of an AEC dump file as a string; if +// `input_aecdump` is not passed, a WAV or AEC input dump file must be specified +// via the `argv` argument. Pass `processed_capture_samples` to write in it the +// samples processed on the capture side; if `processed_capture_samples` is not +// passed, the output file can optionally be specified via the `argv` argument. int AudioprocFloatImpl(std::unique_ptr ap_builder, int argc, char* argv[], diff --git a/modules/audio_processing/test/bitexactness_tools.cc b/modules/audio_processing/test/bitexactness_tools.cc index f245c2cf19..0464345364 100644 --- a/modules/audio_processing/test/bitexactness_tools.cc +++ b/modules/audio_processing/test/bitexactness_tools.cc @@ -33,7 +33,7 @@ std::string GetApmRenderTestVectorFileName(int sample_rate_hz) { case 48000: return ResourcePath("far48_stereo", "pcm"); default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } return ""; } @@ -49,7 +49,7 @@ std::string GetApmCaptureTestVectorFileName(int sample_rate_hz) { case 48000: return ResourcePath("near48_stereo", "pcm"); default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); } return ""; } diff --git a/modules/audio_processing/test/conversational_speech/multiend_call.h b/modules/audio_processing/test/conversational_speech/multiend_call.h index 5b6300f0f1..693f00edd9 100644 --- a/modules/audio_processing/test/conversational_speech/multiend_call.h +++ b/modules/audio_processing/test/conversational_speech/multiend_call.h @@ -24,7 +24,6 @@ #include "modules/audio_processing/test/conversational_speech/timing.h" #include "modules/audio_processing/test/conversational_speech/wavreader_abstract_factory.h" #include "modules/audio_processing/test/conversational_speech/wavreader_interface.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { namespace test { @@ -57,6 +56,9 @@ class MultiEndCall { std::unique_ptr wavreader_abstract_factory); ~MultiEndCall(); + MultiEndCall(const MultiEndCall&) = delete; + MultiEndCall& operator=(const MultiEndCall&) = delete; + const std::set& speaker_names() const { return speaker_names_; } const std::map>& audiotrack_readers() const { @@ -92,8 +94,6 @@ class MultiEndCall { int sample_rate_hz_; size_t total_duration_samples_; std::vector speaking_turns_; - - RTC_DISALLOW_COPY_AND_ASSIGN(MultiEndCall); }; } // namespace conversational_speech diff --git a/modules/audio_processing/test/conversational_speech/simulator.cc b/modules/audio_processing/test/conversational_speech/simulator.cc index 0591252b36..c0fb589852 100644 --- a/modules/audio_processing/test/conversational_speech/simulator.cc +++ b/modules/audio_processing/test/conversational_speech/simulator.cc @@ -22,7 +22,6 @@ #include "common_audio/include/audio_util.h" #include "common_audio/wav_file.h" #include "modules/audio_processing/test/conversational_speech/wavreader_interface.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" #include "test/testsupport/file_utils.h" @@ -125,8 +124,8 @@ std::unique_ptr>> PreloadAudioTracks( return audiotracks_map; } -// Writes all the values in |source_samples| via |wav_writer|. If the number of -// previously written samples in |wav_writer| is less than |interval_begin|, it +// Writes all the values in `source_samples` via `wav_writer`. If the number of +// previously written samples in `wav_writer` is less than `interval_begin`, it // adds zeros as left padding. The padding corresponds to intervals during which // a speaker is not active. void PadLeftWriteChunk(rtc::ArrayView source_samples, @@ -145,9 +144,9 @@ void PadLeftWriteChunk(rtc::ArrayView source_samples, wav_writer->WriteSamples(source_samples.data(), source_samples.size()); } -// Appends zeros via |wav_writer|. The number of zeros is always non-negative +// Appends zeros via `wav_writer`. The number of zeros is always non-negative // and equal to the difference between the previously written samples and -// |pad_samples|. +// `pad_samples`. void PadRightWrite(WavWriter* wav_writer, size_t pad_samples) { RTC_CHECK(wav_writer); RTC_CHECK_GE(pad_samples, wav_writer->num_samples()); diff --git a/modules/audio_processing/test/conversational_speech/simulator.h b/modules/audio_processing/test/conversational_speech/simulator.h index a9992eb5d8..2584782c22 100644 --- a/modules/audio_processing/test/conversational_speech/simulator.h +++ b/modules/audio_processing/test/conversational_speech/simulator.h @@ -17,7 +17,6 @@ #include #include "modules/audio_processing/test/conversational_speech/multiend_call.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { namespace test { diff --git a/modules/audio_processing/test/debug_dump_replayer.cc b/modules/audio_processing/test/debug_dump_replayer.cc index 754b42d281..cab6966765 100644 --- a/modules/audio_processing/test/debug_dump_replayer.cc +++ b/modules/audio_processing/test/debug_dump_replayer.cc @@ -178,14 +178,13 @@ void DebugDumpReplayer::OnRuntimeSettingEvent( void DebugDumpReplayer::MaybeRecreateApm(const audioproc::Config& msg) { // These configurations cannot be changed on the fly. - Config config; RTC_CHECK(msg.has_aec_delay_agnostic_enabled()); RTC_CHECK(msg.has_aec_extended_filter_enabled()); // We only create APM once, since changes on these fields should not // happen in current implementation. if (!apm_.get()) { - apm_.reset(AudioProcessingBuilderForTesting().Create(config)); + apm_ = AudioProcessingBuilderForTesting().Create(); } } diff --git a/modules/audio_processing/test/debug_dump_replayer.h b/modules/audio_processing/test/debug_dump_replayer.h index 4139149024..e514d1d94f 100644 --- a/modules/audio_processing/test/debug_dump_replayer.h +++ b/modules/audio_processing/test/debug_dump_replayer.h @@ -60,7 +60,7 @@ class DebugDumpReplayer { std::unique_ptr> reverse_; std::unique_ptr> output_; - std::unique_ptr apm_; + rtc::scoped_refptr apm_; FILE* debug_file_; diff --git a/modules/audio_processing/test/debug_dump_test.cc b/modules/audio_processing/test/debug_dump_test.cc index 2381d1e8b4..b7351609ec 100644 --- a/modules/audio_processing/test/debug_dump_test.cc +++ b/modules/audio_processing/test/debug_dump_test.cc @@ -47,13 +47,11 @@ class DebugDumpGenerator { const std::string& reverse_file_name, int reverse_rate_hz, int reverse_channels, - const Config& config, const std::string& dump_file_name, bool enable_pre_amplifier); // Constructor that uses default input files. - explicit DebugDumpGenerator(const Config& config, - const AudioProcessing::Config& apm_config); + explicit DebugDumpGenerator(const AudioProcessing::Config& apm_config); ~DebugDumpGenerator(); @@ -112,7 +110,7 @@ class DebugDumpGenerator { bool enable_pre_amplifier_; TaskQueueForTest worker_queue_; - std::unique_ptr apm_; + rtc::scoped_refptr apm_; const std::string dump_file_name_; }; @@ -123,7 +121,6 @@ DebugDumpGenerator::DebugDumpGenerator(const std::string& input_file_name, const std::string& reverse_file_name, int reverse_rate_hz, int reverse_channels, - const Config& config, const std::string& dump_file_name, bool enable_pre_amplifier) : input_config_(input_rate_hz, input_channels), @@ -143,11 +140,10 @@ DebugDumpGenerator::DebugDumpGenerator(const std::string& input_file_name, worker_queue_("debug_dump_generator_worker_queue"), dump_file_name_(dump_file_name) { AudioProcessingBuilderForTesting apm_builder; - apm_.reset(apm_builder.Create(config)); + apm_ = apm_builder.Create(); } DebugDumpGenerator::DebugDumpGenerator( - const Config& config, const AudioProcessing::Config& apm_config) : DebugDumpGenerator(ResourcePath("near32_stereo", "pcm"), 32000, @@ -155,7 +151,6 @@ DebugDumpGenerator::DebugDumpGenerator( ResourcePath("far32_stereo", "pcm"), 32000, 2, - config, TempFilename(OutputPath(), "debug_aec"), apm_config.pre_amplifier.enabled) { apm_->ApplyConfig(apm_config); @@ -290,8 +285,7 @@ void DebugDumpTest::VerifyDebugDump(const std::string& in_filename) { } TEST_F(DebugDumpTest, SimpleCase) { - Config config; - DebugDumpGenerator generator(config, AudioProcessing::Config()); + DebugDumpGenerator generator(/*apm_config=*/{}); generator.StartRecording(); generator.Process(100); generator.StopRecording(); @@ -299,8 +293,7 @@ TEST_F(DebugDumpTest, SimpleCase) { } TEST_F(DebugDumpTest, ChangeInputFormat) { - Config config; - DebugDumpGenerator generator(config, AudioProcessing::Config()); + DebugDumpGenerator generator(/*apm_config=*/{}); generator.StartRecording(); generator.Process(100); @@ -317,8 +310,7 @@ TEST_F(DebugDumpTest, ChangeInputFormat) { } TEST_F(DebugDumpTest, ChangeReverseFormat) { - Config config; - DebugDumpGenerator generator(config, AudioProcessing::Config()); + DebugDumpGenerator generator(/*apm_config=*/{}); generator.StartRecording(); generator.Process(100); generator.SetReverseRate(48000); @@ -329,8 +321,7 @@ TEST_F(DebugDumpTest, ChangeReverseFormat) { } TEST_F(DebugDumpTest, ChangeOutputFormat) { - Config config; - DebugDumpGenerator generator(config, AudioProcessing::Config()); + DebugDumpGenerator generator(/*apm_config=*/{}); generator.StartRecording(); generator.Process(100); generator.SetOutputRate(48000); @@ -341,10 +332,9 @@ TEST_F(DebugDumpTest, ChangeOutputFormat) { } TEST_F(DebugDumpTest, ToggleAec) { - Config config; AudioProcessing::Config apm_config; apm_config.echo_canceller.enabled = true; - DebugDumpGenerator generator(config, apm_config); + DebugDumpGenerator generator(apm_config); generator.StartRecording(); generator.Process(100); @@ -357,14 +347,13 @@ TEST_F(DebugDumpTest, ToggleAec) { } TEST_F(DebugDumpTest, VerifyCombinedExperimentalStringInclusive) { - Config config; AudioProcessing::Config apm_config; apm_config.echo_canceller.enabled = true; apm_config.gain_controller1.analog_gain_controller.enabled = true; apm_config.gain_controller1.analog_gain_controller.startup_min_volume = 0; // Arbitrarily set clipping gain to 17, which will never be the default. apm_config.gain_controller1.analog_gain_controller.clipped_level_min = 17; - DebugDumpGenerator generator(config, apm_config); + DebugDumpGenerator generator(apm_config); generator.StartRecording(); generator.Process(100); generator.StopRecording(); @@ -388,10 +377,9 @@ TEST_F(DebugDumpTest, VerifyCombinedExperimentalStringInclusive) { } TEST_F(DebugDumpTest, VerifyCombinedExperimentalStringExclusive) { - Config config; AudioProcessing::Config apm_config; apm_config.echo_canceller.enabled = true; - DebugDumpGenerator generator(config, apm_config); + DebugDumpGenerator generator(apm_config); generator.StartRecording(); generator.Process(100); generator.StopRecording(); @@ -414,10 +402,9 @@ TEST_F(DebugDumpTest, VerifyCombinedExperimentalStringExclusive) { } TEST_F(DebugDumpTest, VerifyAec3ExperimentalString) { - Config config; AudioProcessing::Config apm_config; apm_config.echo_canceller.enabled = true; - DebugDumpGenerator generator(config, apm_config); + DebugDumpGenerator generator(apm_config); generator.StartRecording(); generator.Process(100); generator.StopRecording(); @@ -439,13 +426,12 @@ TEST_F(DebugDumpTest, VerifyAec3ExperimentalString) { } TEST_F(DebugDumpTest, VerifyAgcClippingLevelExperimentalString) { - Config config; AudioProcessing::Config apm_config; apm_config.gain_controller1.analog_gain_controller.enabled = true; apm_config.gain_controller1.analog_gain_controller.startup_min_volume = 0; // Arbitrarily set clipping gain to 17, which will never be the default. apm_config.gain_controller1.analog_gain_controller.clipped_level_min = 17; - DebugDumpGenerator generator(config, apm_config); + DebugDumpGenerator generator(apm_config); generator.StartRecording(); generator.Process(100); generator.StopRecording(); @@ -467,8 +453,7 @@ TEST_F(DebugDumpTest, VerifyAgcClippingLevelExperimentalString) { } TEST_F(DebugDumpTest, VerifyEmptyExperimentalString) { - Config config; - DebugDumpGenerator generator(config, AudioProcessing::Config()); + DebugDumpGenerator generator(/*apm_config=*/{}); generator.StartRecording(); generator.Process(100); generator.StopRecording(); @@ -495,8 +480,7 @@ TEST_F(DebugDumpTest, VerifyEmptyExperimentalString) { #define MAYBE_ToggleAgc ToggleAgc #endif TEST_F(DebugDumpTest, MAYBE_ToggleAgc) { - Config config; - DebugDumpGenerator generator(config, AudioProcessing::Config()); + DebugDumpGenerator generator(/*apm_config=*/{}); generator.StartRecording(); generator.Process(100); @@ -510,8 +494,7 @@ TEST_F(DebugDumpTest, MAYBE_ToggleAgc) { } TEST_F(DebugDumpTest, ToggleNs) { - Config config; - DebugDumpGenerator generator(config, AudioProcessing::Config()); + DebugDumpGenerator generator(/*apm_config=*/{}); generator.StartRecording(); generator.Process(100); @@ -525,8 +508,7 @@ TEST_F(DebugDumpTest, ToggleNs) { } TEST_F(DebugDumpTest, TransientSuppressionOn) { - Config config; - DebugDumpGenerator generator(config, AudioProcessing::Config()); + DebugDumpGenerator generator(/*apm_config=*/{}); AudioProcessing::Config apm_config = generator.apm()->GetConfig(); apm_config.transient_suppression.enabled = true; @@ -539,10 +521,9 @@ TEST_F(DebugDumpTest, TransientSuppressionOn) { } TEST_F(DebugDumpTest, PreAmplifierIsOn) { - Config config; AudioProcessing::Config apm_config; apm_config.pre_amplifier.enabled = true; - DebugDumpGenerator generator(config, apm_config); + DebugDumpGenerator generator(apm_config); generator.StartRecording(); generator.Process(100); generator.StopRecording(); diff --git a/modules/audio_processing/test/fake_recording_device.cc b/modules/audio_processing/test/fake_recording_device.cc index 5202014b52..3a35ee9d74 100644 --- a/modules/audio_processing/test/fake_recording_device.cc +++ b/modules/audio_processing/test/fake_recording_device.cc @@ -148,7 +148,7 @@ FakeRecordingDevice::FakeRecordingDevice(int initial_mic_level, worker_ = std::make_unique(initial_mic_level); break; default: - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); break; } } diff --git a/modules/audio_processing/test/fake_recording_device.h b/modules/audio_processing/test/fake_recording_device.h index b4d2a103be..da3c0cf794 100644 --- a/modules/audio_processing/test/fake_recording_device.h +++ b/modules/audio_processing/test/fake_recording_device.h @@ -48,18 +48,18 @@ class FakeRecordingDevice final { ~FakeRecordingDevice(); int MicLevel() const; - void SetMicLevel(const int level); - void SetUndoMicLevel(const int level); + void SetMicLevel(int level); + void SetUndoMicLevel(int level); // Simulates the analog gain. - // If |real_device_level| is a valid level, the unmodified mic signal is - // virtually restored. To skip the latter step set |real_device_level| to + // If `real_device_level` is a valid level, the unmodified mic signal is + // virtually restored. To skip the latter step set `real_device_level` to // an empty value. void SimulateAnalogGain(rtc::ArrayView buffer); // Simulates the analog gain. - // If |real_device_level| is a valid level, the unmodified mic signal is - // virtually restored. To skip the latter step set |real_device_level| to + // If `real_device_level` is a valid level, the unmodified mic signal is + // virtually restored. To skip the latter step set `real_device_level` to // an empty value. void SimulateAnalogGain(ChannelBuffer* buffer); diff --git a/modules/audio_processing/test/fake_recording_device_unittest.cc b/modules/audio_processing/test/fake_recording_device_unittest.cc index 74bb47f710..2ac8b1dc48 100644 --- a/modules/audio_processing/test/fake_recording_device_unittest.cc +++ b/modules/audio_processing/test/fake_recording_device_unittest.cc @@ -75,7 +75,7 @@ void CheckIfMonotoneSamplesModules(const ChannelBuffer* prev, } // Checks that the samples in each pair have the same sign unless the sample in -// |dst| is zero (because of zero gain). +// `dst` is zero (because of zero gain). void CheckSameSign(const ChannelBuffer* src, const ChannelBuffer* dst) { RTC_DCHECK_EQ(src->num_channels(), dst->num_channels()); diff --git a/modules/audio_processing/test/performance_timer.h b/modules/audio_processing/test/performance_timer.h index b6e0da70d8..5375ba74e8 100644 --- a/modules/audio_processing/test/performance_timer.h +++ b/modules/audio_processing/test/performance_timer.h @@ -31,7 +31,7 @@ class PerformanceTimer { double GetDurationStandardDeviation() const; // These methods are the same as those above, but they ignore the first - // |number_of_warmup_samples| measurements. + // `number_of_warmup_samples` measurements. double GetDurationAverage(size_t number_of_warmup_samples) const; double GetDurationStandardDeviation(size_t number_of_warmup_samples) const; diff --git a/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_boxplot.py b/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_boxplot.py index 60d1e85188..c425885b95 100644 --- a/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_boxplot.py +++ b/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_boxplot.py @@ -88,7 +88,7 @@ def FilterScoresByParams(data_frame, filter_params, score_name, config_dir): data_cell_scores = data_with_config[data_with_config.eval_score_name == score_name] - # Exactly one of |params_to_plot| must match: + # Exactly one of `params_to_plot` must match: (matching_param, ) = [ x for x in filter_params if '-' + x in config_json ] diff --git a/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py b/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py index b0be37ce77..ecae2ed995 100644 --- a/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py +++ b/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py @@ -133,7 +133,7 @@ def _FindOptimalParameter(configs_and_scores, score_weighting): {score1: value1, ...}}] into a numeric value Returns: - the config that has the largest values of |score_weighting| applied + the config that has the largest values of `score_weighting` applied to its scores. """ diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py index 23f6effa9c..59c5f74be4 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py @@ -397,7 +397,7 @@ def _Run(self, output_path): # TODO(alessiob): Fix or remove if not needed. # thd = np.sqrt(np.sum(b_terms[1:]**2)) / b_terms[0] - # TODO(alessiob): Check the range of |thd_plus_noise| and update the class + # TODO(alessiob): Check the range of `thd_plus_noise` and update the class # docstring above if accordingly. thd_plus_noise = distortion_and_noise / b_terms[0] diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py index fb3aae0022..fe3a6c7cb9 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py @@ -349,7 +349,7 @@ def _BuildScoreStatsInspectorTableCell(self, score_tuple, anchor_data): def _SliceDataForScoreTableCell(self, score_name, apm_config, test_data_gen, test_data_gen_params): - """Slices |self._scores_data_frame| to extract the data for a tab.""" + """Slices `self._scores_data_frame` to extract the data for a tab.""" masks = [] masks.append(self._scores_data_frame.eval_score_name == score_name) masks.append(self._scores_data_frame.apm_config == apm_config) @@ -363,7 +363,7 @@ def _SliceDataForScoreTableCell(self, score_name, apm_config, @classmethod def _SliceDataForScoreStatsTableCell(cls, scores, capture, render, echo_simulator): - """Slices |scores| to extract the data for a tab.""" + """Slices `scores` to extract the data for a tab.""" masks = [] masks.append(scores.capture == capture) @@ -378,7 +378,7 @@ def _SliceDataForScoreStatsTableCell(cls, scores, capture, render, @classmethod def _FindUniqueTuples(cls, data_frame, fields): - """Slices |data_frame| to a list of fields and finds unique tuples.""" + """Slices `data_frame` to a list of fields and finds unique tuples.""" return data_frame[fields].drop_duplicates().values.tolist() @classmethod diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py index f9125fa7f3..af022bd461 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py @@ -47,7 +47,7 @@ def Mix(cls, output_path, capture_input_filepath, echo_filepath): Hard-clipping may occur in the mix; a warning is raised when this happens. - If |echo_filepath| is None, nothing is done and |capture_input_filepath| is + If `echo_filepath` is None, nothing is done and `capture_input_filepath` is returned. Args: diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py index e41637cd8d..95e801903d 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py @@ -174,7 +174,7 @@ def DetectHardClipping(cls, signal, threshold=2): """Detects hard clipping. Hard clipping is simply detected by counting samples that touch either the - lower or upper bound too many times in a row (according to |threshold|). + lower or upper bound too many times in a row (according to `threshold`). The presence of a single sequence of samples meeting such property is enough to label the signal as hard clipped. @@ -295,16 +295,16 @@ def MixSignals(cls, noise, target_snr=0.0, pad_noise=MixPadding.NO_PADDING): - """Mixes |signal| and |noise| with a target SNR. + """Mixes `signal` and `noise` with a target SNR. - Mix |signal| and |noise| with a desired SNR by scaling |noise|. + Mix `signal` and `noise` with a desired SNR by scaling `noise`. If the target SNR is +/- infinite, a copy of signal/noise is returned. - If |signal| is shorter than |noise|, the length of the mix equals that of - |signal|. Otherwise, the mix length depends on whether padding is applied. - When padding is not applied, that is |pad_noise| is set to NO_PADDING - (default), the mix length equals that of |noise| - i.e., |signal| is - truncated. Otherwise, |noise| is extended and the resulting mix has the same - length of |signal|. + If `signal` is shorter than `noise`, the length of the mix equals that of + `signal`. Otherwise, the mix length depends on whether padding is applied. + When padding is not applied, that is `pad_noise` is set to NO_PADDING + (default), the mix length equals that of `noise` - i.e., `signal` is + truncated. Otherwise, `noise` is extended and the resulting mix has the same + length of `signal`. Args: signal: AudioSegment instance (signal). @@ -342,18 +342,18 @@ def MixSignals(cls, signal_duration = len(signal) noise_duration = len(noise) if signal_duration <= noise_duration: - # Ignore |pad_noise|, |noise| is truncated if longer that |signal|, the - # mix will have the same length of |signal|. + # Ignore `pad_noise`, `noise` is truncated if longer that `signal`, the + # mix will have the same length of `signal`. return signal.overlay(noise.apply_gain(gain_db)) elif pad_noise == cls.MixPadding.NO_PADDING: - # |signal| is longer than |noise|, but no padding is applied to |noise|. - # Truncate |signal|. + # `signal` is longer than `noise`, but no padding is applied to `noise`. + # Truncate `signal`. return noise.overlay(signal, gain_during_overlay=gain_db) elif pad_noise == cls.MixPadding.ZERO_PADDING: # TODO(alessiob): Check that this works as expected. return signal.overlay(noise.apply_gain(gain_db)) elif pad_noise == cls.MixPadding.LOOP: - # |signal| is longer than |noise|, extend |noise| by looping. + # `signal` is longer than `noise`, extend `noise` by looping. return signal.overlay(noise.apply_gain(gain_db), loop=True) else: raise exceptions.SignalProcessingException('invalid padding type') diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py index fe30c9c44c..69b3a1624e 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py @@ -264,7 +264,7 @@ def _CreateInputSignal(input_signal_filepath): The file name is parsed to extract input signal creator and params. If a creator is matched and the parameters are valid, a new signal is generated - and written in |input_signal_filepath|. + and written in `input_signal_filepath`. Args: input_signal_filepath: Path to the input signal audio file to write. diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py index 6d0cb79f5b..f75098ae2c 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py @@ -116,7 +116,7 @@ def GetNoiseReferenceFilePaths(identity_generator): key = noisy_signal_filepaths.keys()[0] return noisy_signal_filepaths[key], reference_signal_filepaths[key] - # Test the |copy_with_identity| flag. + # Test the `copy_with_identity` flag. for copy_with_identity in [False, True]: # Instance the generator through the factory. factory = test_data_generation_factory.TestDataGeneratorFactory( @@ -126,7 +126,7 @@ def GetNoiseReferenceFilePaths(identity_generator): factory.SetOutputDirectoryPrefix('datagen-') generator = factory.GetInstance( test_data_generation.IdentityTestDataGenerator) - # Check |copy_with_identity| is set correctly. + # Check `copy_with_identity` is set correctly. self.assertEqual(copy_with_identity, generator.copy_with_identity) # Generate test data and extract the paths to the noise and the reference @@ -137,7 +137,7 @@ def GetNoiseReferenceFilePaths(identity_generator): noisy_signal_filepath, reference_signal_filepath = ( GetNoiseReferenceFilePaths(generator)) - # Check that a copy is made if and only if |copy_with_identity| is True. + # Check that a copy is made if and only if `copy_with_identity` is True. if copy_with_identity: self.assertNotEqual(noisy_signal_filepath, input_signal_filepath) diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/vad.cc b/modules/audio_processing/test/py_quality_assessment/quality_assessment/vad.cc index 9906eca7f4..b47f6221cb 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/vad.cc +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/vad.cc @@ -63,7 +63,7 @@ int main(int argc, char* argv[]) { std::unique_ptr vad = CreateVad(Vad::Aggressiveness::kVadNormal); std::array samples; char buff = 0; // Buffer to write one bit per frame. - uint8_t next = 0; // Points to the next bit to write in |buff|. + uint8_t next = 0; // Points to the next bit to write in `buff`. while (true) { // Process frame. const auto read_samples = diff --git a/modules/audio_processing/test/simulator_buffers.cc b/modules/audio_processing/test/simulator_buffers.cc index e6bd6c1c19..458f6ced76 100644 --- a/modules/audio_processing/test/simulator_buffers.cc +++ b/modules/audio_processing/test/simulator_buffers.cc @@ -58,7 +58,7 @@ void SimulatorBuffers::CreateConfigAndBuffer( std::vector* buffer_data, std::vector* buffer_data_samples) { int samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, 100); - *config = StreamConfig(sample_rate_hz, num_channels, false); + *config = StreamConfig(sample_rate_hz, num_channels); buffer->reset( new AudioBuffer(config->sample_rate_hz(), config->num_channels(), config->sample_rate_hz(), config->num_channels(), diff --git a/modules/audio_processing/test/test_utils.cc b/modules/audio_processing/test/test_utils.cc index 839358d497..dcd5869e0b 100644 --- a/modules/audio_processing/test/test_utils.cc +++ b/modules/audio_processing/test/test_utils.cc @@ -139,15 +139,4 @@ void SetFrameSampleRate(Int16FrameData* frame, int sample_rate_hz) { AudioProcessing::kChunkSizeMs * sample_rate_hz / 1000; } -AudioProcessing::ChannelLayout LayoutFromChannels(size_t num_channels) { - switch (num_channels) { - case 1: - return AudioProcessing::kMono; - case 2: - return AudioProcessing::kStereo; - default: - RTC_CHECK_NOTREACHED(); - } -} - } // namespace webrtc diff --git a/modules/audio_processing/test/test_utils.h b/modules/audio_processing/test/test_utils.h index e2d243eec4..de0fc11893 100644 --- a/modules/audio_processing/test/test_utils.h +++ b/modules/audio_processing/test/test_utils.h @@ -23,7 +23,6 @@ #include "common_audio/channel_buffer.h" #include "common_audio/wav_file.h" #include "modules/audio_processing/include/audio_processing.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -35,13 +34,14 @@ class RawFile final { explicit RawFile(const std::string& filename); ~RawFile(); + RawFile(const RawFile&) = delete; + RawFile& operator=(const RawFile&) = delete; + void WriteSamples(const int16_t* samples, size_t num_samples); void WriteSamples(const float* samples, size_t num_samples); private: FILE* file_handle_; - - RTC_DISALLOW_COPY_AND_ASSIGN(RawFile); }; // Encapsulates samples and metadata for an integer frame. @@ -78,15 +78,16 @@ class ChannelBufferWavReader final { explicit ChannelBufferWavReader(std::unique_ptr file); ~ChannelBufferWavReader(); - // Reads data from the file according to the |buffer| format. Returns false if + ChannelBufferWavReader(const ChannelBufferWavReader&) = delete; + ChannelBufferWavReader& operator=(const ChannelBufferWavReader&) = delete; + + // Reads data from the file according to the `buffer` format. Returns false if // a full buffer can't be read from the file. bool Read(ChannelBuffer* buffer); private: std::unique_ptr file_; std::vector interleaved_; - - RTC_DISALLOW_COPY_AND_ASSIGN(ChannelBufferWavReader); }; // Writes ChannelBuffers to a provided WavWriter. @@ -95,13 +96,14 @@ class ChannelBufferWavWriter final { explicit ChannelBufferWavWriter(std::unique_ptr file); ~ChannelBufferWavWriter(); + ChannelBufferWavWriter(const ChannelBufferWavWriter&) = delete; + ChannelBufferWavWriter& operator=(const ChannelBufferWavWriter&) = delete; + void Write(const ChannelBuffer& buffer); private: std::unique_ptr file_; std::vector interleaved_; - - RTC_DISALLOW_COPY_AND_ASSIGN(ChannelBufferWavWriter); }; // Takes a pointer to a vector. Allows appending the samples of channel buffers @@ -115,7 +117,7 @@ class ChannelBufferVectorWriter final { delete; ~ChannelBufferVectorWriter(); - // Creates an interleaved copy of |buffer|, converts the samples to float S16 + // Creates an interleaved copy of `buffer`, converts the samples to float S16 // and appends the result to output_. void Write(const ChannelBuffer& buffer); @@ -152,8 +154,6 @@ void SetContainerFormat(int sample_rate_hz, cb->reset(new ChannelBuffer(frame->samples_per_channel, num_channels)); } -AudioProcessing::ChannelLayout LayoutFromChannels(size_t num_channels); - template float ComputeSNR(const T* ref, const T* test, size_t length, float* variance) { float mse = 0; diff --git a/modules/audio_processing/test/wav_based_simulator.cc b/modules/audio_processing/test/wav_based_simulator.cc index e6a6fe92eb..10c0d3717e 100644 --- a/modules/audio_processing/test/wav_based_simulator.cc +++ b/modules/audio_processing/test/wav_based_simulator.cc @@ -44,10 +44,7 @@ WavBasedSimulator::GetCustomEventChain(const std::string& filename) { case '\n': break; default: - RTC_FATAL() - << "Incorrect custom call order file, reverting to using the " - << "default call order"; - return WavBasedSimulator::GetDefaultEventChain(); + RTC_FATAL() << "Incorrect custom call order file"; } num_read = file_wrapper.Read(&c, sizeof(char)); diff --git a/modules/audio_processing/three_band_filter_bank.cc b/modules/audio_processing/three_band_filter_bank.cc index 2a7d272e60..bd1c50477a 100644 --- a/modules/audio_processing/three_band_filter_bank.cc +++ b/modules/audio_processing/three_band_filter_bank.cc @@ -39,16 +39,16 @@ namespace webrtc { namespace { -// Factors to take into account when choosing |kFilterSize|: -// 1. Higher |kFilterSize|, means faster transition, which ensures less +// Factors to take into account when choosing `kFilterSize`: +// 1. Higher `kFilterSize`, means faster transition, which ensures less // aliasing. This is especially important when there is non-linear // processing between the splitting and merging. // 2. The delay that this filter bank introduces is -// |kNumBands| * |kSparsity| * |kFilterSize| / 2, so it increases linearly -// with |kFilterSize|. -// 3. The computation complexity also increases linearly with |kFilterSize|. +// `kNumBands` * `kSparsity` * `kFilterSize` / 2, so it increases linearly +// with `kFilterSize`. +// 3. The computation complexity also increases linearly with `kFilterSize`. -// The Matlab code to generate these |kFilterCoeffs| is: +// The Matlab code to generate these `kFilterCoeffs` is: // // N = kNumBands * kSparsity * kFilterSize - 1; // h = fir1(N, 1 / (2 * kNumBands), kaiser(N + 1, 3.5)); @@ -59,7 +59,7 @@ namespace { // Because the total bandwidth of the lower and higher band is double the middle // one (because of the spectrum parity), the low-pass prototype is half the -// bandwidth of 1 / (2 * |kNumBands|) and is then shifted with cosine modulation +// bandwidth of 1 / (2 * `kNumBands`) and is then shifted with cosine modulation // to the right places. // A Kaiser window is used because of its flexibility and the alpha is set to // 3.5, since that sets a stop band attenuation of 40dB ensuring a fast @@ -100,8 +100,8 @@ const float kDctModulation[ThreeBandFilterBank::kNumNonZeroFilters][kDctSize] = {1.f, -2.f, 1.f}, {1.73205077f, 0.f, -1.73205077f}}; -// Filters the input signal |in| with the filter |filter| using a shift by -// |in_shift|, taking into account the previous state. +// Filters the input signal `in` with the filter `filter` using a shift by +// `in_shift`, taking into account the previous state. void FilterCore( rtc::ArrayView filter, rtc::ArrayView in, @@ -164,10 +164,10 @@ ThreeBandFilterBank::ThreeBandFilterBank() { ThreeBandFilterBank::~ThreeBandFilterBank() = default; // The analysis can be separated in these steps: -// 1. Serial to parallel downsampling by a factor of |kNumBands|. -// 2. Filtering of |kSparsity| different delayed signals with polyphase +// 1. Serial to parallel downsampling by a factor of `kNumBands`. +// 2. Filtering of `kSparsity` different delayed signals with polyphase // decomposition of the low-pass prototype filter and upsampled by a factor -// of |kSparsity|. +// of `kSparsity`. // 3. Modulating with cosines and accumulating to get the desired band. void ThreeBandFilterBank::Analysis( rtc::ArrayView in, @@ -211,8 +211,9 @@ void ThreeBandFilterBank::Analysis( // Band and modulate the output. for (int band = 0; band < ThreeBandFilterBank::kNumBands; ++band) { + float* out_band = out[band].data(); for (int n = 0; n < kSplitBandSize; ++n) { - out[band][n] += dct_modulation[band] * out_subsampled[n]; + out_band[n] += dct_modulation[band] * out_subsampled[n]; } } } @@ -222,9 +223,9 @@ void ThreeBandFilterBank::Analysis( // The synthesis can be separated in these steps: // 1. Modulating with cosines. // 2. Filtering each one with a polyphase decomposition of the low-pass -// prototype filter upsampled by a factor of |kSparsity| and accumulating -// |kSparsity| signals with different delays. -// 3. Parallel to serial upsampling by a factor of |kNumBands|. +// prototype filter upsampled by a factor of `kSparsity` and accumulating +// `kSparsity` signals with different delays. +// 3. Parallel to serial upsampling by a factor of `kNumBands`. void ThreeBandFilterBank::Synthesis( rtc::ArrayView, ThreeBandFilterBank::kNumBands> in, @@ -254,8 +255,9 @@ void ThreeBandFilterBank::Synthesis( std::fill(in_subsampled.begin(), in_subsampled.end(), 0.f); for (int band = 0; band < ThreeBandFilterBank::kNumBands; ++band) { RTC_DCHECK_EQ(in[band].size(), kSplitBandSize); + const float* in_band = in[band].data(); for (int n = 0; n < kSplitBandSize; ++n) { - in_subsampled[n] += dct_modulation[band] * in[band][n]; + in_subsampled[n] += dct_modulation[band] * in_band[n]; } } diff --git a/modules/audio_processing/three_band_filter_bank.h b/modules/audio_processing/three_band_filter_bank.h index e6346dec44..db66caba4a 100644 --- a/modules/audio_processing/three_band_filter_bank.h +++ b/modules/audio_processing/three_band_filter_bank.h @@ -55,13 +55,13 @@ class ThreeBandFilterBank final { ThreeBandFilterBank(); ~ThreeBandFilterBank(); - // Splits |in| of size kFullBandSize into 3 downsampled frequency bands in - // |out|, each of size 160. + // Splits `in` of size kFullBandSize into 3 downsampled frequency bands in + // `out`, each of size 160. void Analysis(rtc::ArrayView in, rtc::ArrayView, kNumBands> out); - // Merges the 3 downsampled frequency bands in |in|, each of size 160, into - // |out|, which is of size kFullBandSize. + // Merges the 3 downsampled frequency bands in `in`, each of size 160, into + // `out`, which is of size kFullBandSize. void Synthesis(rtc::ArrayView, kNumBands> in, rtc::ArrayView out); diff --git a/modules/audio_processing/transient/click_annotate.cc b/modules/audio_processing/transient/click_annotate.cc index 21641f85cc..f3f040f9aa 100644 --- a/modules/audio_processing/transient/click_annotate.cc +++ b/modules/audio_processing/transient/click_annotate.cc @@ -26,7 +26,7 @@ using webrtc::TransientDetector; // Creates a send times array, one for each step. // Each block that contains a transient, has an infinite send time. // The resultant array is written to a DAT file -// Returns -1 on error or |lost_packets| otherwise. +// Returns -1 on error or `lost_packets` otherwise. int main(int argc, char* argv[]) { if (argc != 5) { printf("\n%s - Application to generate a RTP timing file.\n\n", argv[0]); diff --git a/modules/audio_processing/transient/dyadic_decimator.h b/modules/audio_processing/transient/dyadic_decimator.h index fcb56b7dc9..52467e8c25 100644 --- a/modules/audio_processing/transient/dyadic_decimator.h +++ b/modules/audio_processing/transient/dyadic_decimator.h @@ -18,7 +18,7 @@ namespace webrtc { // Returns the proper length of the output buffer that you should use for the -// given |in_length| and decimation |odd_sequence|. +// given `in_length` and decimation `odd_sequence`. // Return -1 on error. inline size_t GetOutLengthToDyadicDecimate(size_t in_length, bool odd_sequence) { @@ -34,10 +34,10 @@ inline size_t GetOutLengthToDyadicDecimate(size_t in_length, // Performs a dyadic decimation: removes every odd/even member of a sequence // halving its overall length. // Arguments: -// in: array of |in_length|. +// in: array of `in_length`. // odd_sequence: If false, the odd members will be removed (1, 3, 5, ...); // if true, the even members will be removed (0, 2, 4, ...). -// out: array of |out_length|. |out_length| must be large enough to +// out: array of `out_length`. `out_length` must be large enough to // hold the decimated output. The necessary length can be provided by // GetOutLengthToDyadicDecimate(). // Must be previously allocated. diff --git a/modules/audio_processing/transient/dyadic_decimator_unittest.cc b/modules/audio_processing/transient/dyadic_decimator_unittest.cc index 3e65a7b9bf..e4776d694f 100644 --- a/modules/audio_processing/transient/dyadic_decimator_unittest.cc +++ b/modules/audio_processing/transient/dyadic_decimator_unittest.cc @@ -42,7 +42,7 @@ TEST(DyadicDecimatorTest, DyadicDecimateErrorValues) { static_cast(NULL), kOutBufferLength); EXPECT_EQ(0u, out_samples); - // Less than required |out_length|. + // Less than required `out_length`. out_samples = DyadicDecimate(test_buffer_even_len, kEvenBufferLength, false, // Even sequence. test_buffer_out, 2); diff --git a/modules/audio_processing/transient/file_utils.h b/modules/audio_processing/transient/file_utils.h index 618401761b..b748337773 100644 --- a/modules/audio_processing/transient/file_utils.h +++ b/modules/audio_processing/transient/file_utils.h @@ -50,63 +50,63 @@ int ConvertFloatToByteArray(float value, uint8_t out_bytes[4]); // Returns 0 if correct, -1 on error. int ConvertDoubleToByteArray(double value, uint8_t out_bytes[8]); -// Reads |length| 16-bit integers from |file| to |buffer|. -// |file| must be previously opened. +// Reads `length` 16-bit integers from `file` to `buffer`. +// `file` must be previously opened. // Returns the number of 16-bit integers read or -1 on error. size_t ReadInt16BufferFromFile(FileWrapper* file, size_t length, int16_t* buffer); -// Reads |length| 16-bit integers from |file| and stores those values -// (converting them) in |buffer|. -// |file| must be previously opened. +// Reads `length` 16-bit integers from `file` and stores those values +// (converting them) in `buffer`. +// `file` must be previously opened. // Returns the number of 16-bit integers read or -1 on error. size_t ReadInt16FromFileToFloatBuffer(FileWrapper* file, size_t length, float* buffer); -// Reads |length| 16-bit integers from |file| and stores those values -// (converting them) in |buffer|. -// |file| must be previously opened. +// Reads `length` 16-bit integers from `file` and stores those values +// (converting them) in `buffer`. +// `file` must be previously opened. // Returns the number of 16-bit integers read or -1 on error. size_t ReadInt16FromFileToDoubleBuffer(FileWrapper* file, size_t length, double* buffer); -// Reads |length| floats in binary representation (4 bytes) from |file| to -// |buffer|. -// |file| must be previously opened. +// Reads `length` floats in binary representation (4 bytes) from `file` to +// `buffer`. +// `file` must be previously opened. // Returns the number of floats read or -1 on error. size_t ReadFloatBufferFromFile(FileWrapper* file, size_t length, float* buffer); -// Reads |length| doubles in binary representation (8 bytes) from |file| to -// |buffer|. -// |file| must be previously opened. +// Reads `length` doubles in binary representation (8 bytes) from `file` to +// `buffer`. +// `file` must be previously opened. // Returns the number of doubles read or -1 on error. size_t ReadDoubleBufferFromFile(FileWrapper* file, size_t length, double* buffer); -// Writes |length| 16-bit integers from |buffer| in binary representation (2 -// bytes) to |file|. It flushes |file|, so after this call there are no +// Writes `length` 16-bit integers from `buffer` in binary representation (2 +// bytes) to `file`. It flushes `file`, so after this call there are no // writings pending. -// |file| must be previously opened. +// `file` must be previously opened. // Returns the number of doubles written or -1 on error. size_t WriteInt16BufferToFile(FileWrapper* file, size_t length, const int16_t* buffer); -// Writes |length| floats from |buffer| in binary representation (4 bytes) to -// |file|. It flushes |file|, so after this call there are no writtings pending. -// |file| must be previously opened. +// Writes `length` floats from `buffer` in binary representation (4 bytes) to +// `file`. It flushes `file`, so after this call there are no writtings pending. +// `file` must be previously opened. // Returns the number of doubles written or -1 on error. size_t WriteFloatBufferToFile(FileWrapper* file, size_t length, const float* buffer); -// Writes |length| doubles from |buffer| in binary representation (8 bytes) to -// |file|. It flushes |file|, so after this call there are no writings pending. -// |file| must be previously opened. +// Writes `length` doubles from `buffer` in binary representation (8 bytes) to +// `file`. It flushes `file`, so after this call there are no writings pending. +// `file` must be previously opened. // Returns the number of doubles written or -1 on error. size_t WriteDoubleBufferToFile(FileWrapper* file, size_t length, diff --git a/modules/audio_processing/transient/moving_moments.h b/modules/audio_processing/transient/moving_moments.h index 6dc05204da..70451dcb71 100644 --- a/modules/audio_processing/transient/moving_moments.h +++ b/modules/audio_processing/transient/moving_moments.h @@ -26,13 +26,13 @@ namespace webrtc { // the last values of the moments. When needed. class MovingMoments { public: - // Creates a Moving Moments object, that uses the last |length| values + // Creates a Moving Moments object, that uses the last `length` values // (including the new value introduced in every new calculation). explicit MovingMoments(size_t length); ~MovingMoments(); - // Calculates the new values using |in|. Results will be in the out buffers. - // |first| and |second| must be allocated with at least |in_length|. + // Calculates the new values using `in`. Results will be in the out buffers. + // `first` and `second` must be allocated with at least `in_length`. void CalculateMoments(const float* in, size_t in_length, float* first, @@ -40,7 +40,7 @@ class MovingMoments { private: size_t length_; - // A queue holding the |length_| latest input values. + // A queue holding the `length_` latest input values. std::queue queue_; // Sum of the values of the queue. float sum_; diff --git a/modules/audio_processing/transient/transient_detector.cc b/modules/audio_processing/transient/transient_detector.cc index f03a2ea2bf..5c35505368 100644 --- a/modules/audio_processing/transient/transient_detector.cc +++ b/modules/audio_processing/transient/transient_detector.cc @@ -43,8 +43,8 @@ TransientDetector::TransientDetector(int sample_rate_hz) sample_rate_hz == ts::kSampleRate48kHz); int samples_per_transient = sample_rate_hz * kTransientLengthMs / 1000; // Adjustment to avoid data loss while downsampling, making - // |samples_per_chunk_| and |samples_per_transient| always divisible by - // |kLeaves|. + // `samples_per_chunk_` and `samples_per_transient` always divisible by + // `kLeaves`. samples_per_chunk_ -= samples_per_chunk_ % kLeaves; samples_per_transient -= samples_per_transient % kLeaves; @@ -137,7 +137,7 @@ float TransientDetector::Detect(const float* data, // In the current implementation we return the max of the current result and // the previous results, so the high results have a width equals to - // |transient_length|. + // `transient_length`. return *std::max_element(previous_results_.begin(), previous_results_.end()); } diff --git a/modules/audio_processing/transient/transient_detector.h b/modules/audio_processing/transient/transient_detector.h index 5ede2e83d6..a3dbb7ffde 100644 --- a/modules/audio_processing/transient/transient_detector.h +++ b/modules/audio_processing/transient/transient_detector.h @@ -37,8 +37,8 @@ class TransientDetector { ~TransientDetector(); - // Calculates the log-likelihood of the existence of a transient in |data|. - // |data_length| has to be equal to |samples_per_chunk_|. + // Calculates the log-likelihood of the existence of a transient in `data`. + // `data_length` has to be equal to `samples_per_chunk_`. // Returns a value between 0 and 1, as a non linear representation of this // likelihood. // Returns a negative value on error. @@ -71,7 +71,7 @@ class TransientDetector { float last_second_moment_[kLeaves]; // We keep track of the previous results from the previous chunks, so it can - // be used to effectively give results according to the |transient_length|. + // be used to effectively give results according to the `transient_length`. std::deque previous_results_; // Number of chunks that are going to return only zeros at the beginning of diff --git a/modules/audio_processing/transient/transient_suppression_test.cc b/modules/audio_processing/transient/transient_suppression_test.cc index d06fd96bac..21409132d2 100644 --- a/modules/audio_processing/transient/transient_suppression_test.cc +++ b/modules/audio_processing/transient/transient_suppression_test.cc @@ -191,8 +191,7 @@ void void_main() { in_file, audio_buffer_size, absl::GetFlag(FLAGS_num_channels), audio_buffer_i.get(), detection_file, detection_buffer_size, detection_buffer.get(), reference_file, reference_buffer.get())) { - agc.Process(audio_buffer_i.get(), static_cast(audio_buffer_size), - absl::GetFlag(FLAGS_sample_rate_hz)); + agc.Process({audio_buffer_i.get(), audio_buffer_size}); for (size_t i = 0; i < absl::GetFlag(FLAGS_num_channels) * audio_buffer_size; ++i) { diff --git a/modules/audio_processing/transient/transient_suppressor.h b/modules/audio_processing/transient/transient_suppressor.h index bb262b0684..982ddbd0ec 100644 --- a/modules/audio_processing/transient/transient_suppressor.h +++ b/modules/audio_processing/transient/transient_suppressor.h @@ -27,22 +27,22 @@ class TransientSuppressor { int detector_rate_hz, int num_channels) = 0; - // Processes a |data| chunk, and returns it with keystrokes suppressed from + // Processes a `data` chunk, and returns it with keystrokes suppressed from // it. The float format is assumed to be int16 ranged. If there are more than - // one channel, the chunks are concatenated one after the other in |data|. - // |data_length| must be equal to |data_length_|. - // |num_channels| must be equal to |num_channels_|. - // A sub-band, ideally the higher, can be used as |detection_data|. If it is - // NULL, |data| is used for the detection too. The |detection_data| is always + // one channel, the chunks are concatenated one after the other in `data`. + // `data_length` must be equal to `data_length_`. + // `num_channels` must be equal to `num_channels_`. + // A sub-band, ideally the higher, can be used as `detection_data`. If it is + // NULL, `data` is used for the detection too. The `detection_data` is always // assumed mono. // If a reference signal (e.g. keyboard microphone) is available, it can be - // passed in as |reference_data|. It is assumed mono and must have the same - // length as |data|. NULL is accepted if unavailable. + // passed in as `reference_data`. It is assumed mono and must have the same + // length as `data`. NULL is accepted if unavailable. // This suppressor performs better if voice information is available. - // |voice_probability| is the probability of voice being present in this chunk - // of audio. If voice information is not available, |voice_probability| must + // `voice_probability` is the probability of voice being present in this chunk + // of audio. If voice information is not available, `voice_probability` must // always be set to 1. - // |key_pressed| determines if a key was pressed on this audio chunk. + // `key_pressed` determines if a key was pressed on this audio chunk. // Returns 0 on success and -1 otherwise. virtual int Suppress(float* data, size_t data_length, diff --git a/modules/audio_processing/transient/transient_suppressor_impl.cc b/modules/audio_processing/transient/transient_suppressor_impl.cc index d515d30131..f8161f6428 100644 --- a/modules/audio_processing/transient/transient_suppressor_impl.cc +++ b/modules/audio_processing/transient/transient_suppressor_impl.cc @@ -102,7 +102,7 @@ int TransientSuppressorImpl::Initialize(int sample_rate_hz, detector_.reset(new TransientDetector(detection_rate_hz)); data_length_ = sample_rate_hz * ts::kChunkSizeMs / 1000; if (data_length_ > analysis_length_) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return -1; } buffer_delay_ = analysis_length_ - data_length_; @@ -194,7 +194,7 @@ int TransientSuppressorImpl::Suppress(float* data, using_reference_ = detector_->using_reference(); - // |detector_smoothed_| follows the |detector_result| when this last one is + // `detector_smoothed_` follows the `detector_result` when this last one is // increasing, but has an exponential decaying tail to be able to suppress // the ringing of keyclicks. float smooth_factor = using_reference_ ? 0.6 : 0.1; @@ -223,7 +223,7 @@ int TransientSuppressorImpl::Suppress(float* data, } // This should only be called when detection is enabled. UpdateBuffers() must -// have been called. At return, |out_buffer_| will be filled with the +// have been called. At return, `out_buffer_` will be filled with the // processed output. void TransientSuppressorImpl::Suppress(float* in_ptr, float* spectral_mean, @@ -325,7 +325,7 @@ void TransientSuppressorImpl::UpdateRestoration(float voice_probability) { } // Shift buffers to make way for new data. Must be called after -// |detection_enabled_| is updated by UpdateKeypress(). +// `detection_enabled_` is updated by UpdateKeypress(). void TransientSuppressorImpl::UpdateBuffers(float* data) { // TODO(aluebs): Change to ring buffer. memmove(in_buffer_.get(), &in_buffer_[data_length_], @@ -350,9 +350,9 @@ void TransientSuppressorImpl::UpdateBuffers(float* data) { } // Restores the unvoiced signal if a click is present. -// Attenuates by a certain factor every peak in the |fft_buffer_| that exceeds -// the spectral mean. The attenuation depends on |detector_smoothed_|. -// If a restoration takes place, the |magnitudes_| are updated to the new value. +// Attenuates by a certain factor every peak in the `fft_buffer_` that exceeds +// the spectral mean. The attenuation depends on `detector_smoothed_`. +// If a restoration takes place, the `magnitudes_` are updated to the new value. void TransientSuppressorImpl::HardRestoration(float* spectral_mean) { const float detector_result = 1.f - std::pow(1.f - detector_smoothed_, using_reference_ ? 200.f : 50.f); @@ -376,10 +376,10 @@ void TransientSuppressorImpl::HardRestoration(float* spectral_mean) { } // Restores the voiced signal if a click is present. -// Attenuates by a certain factor every peak in the |fft_buffer_| that exceeds +// Attenuates by a certain factor every peak in the `fft_buffer_` that exceeds // the spectral mean and that is lower than some function of the current block -// frequency mean. The attenuation depends on |detector_smoothed_|. -// If a restoration takes place, the |magnitudes_| are updated to the new value. +// frequency mean. The attenuation depends on `detector_smoothed_`. +// If a restoration takes place, the `magnitudes_` are updated to the new value. void TransientSuppressorImpl::SoftRestoration(float* spectral_mean) { // Get the spectral magnitude mean of the current block. float block_frequency_mean = 0; diff --git a/modules/audio_processing/transient/transient_suppressor_impl.h b/modules/audio_processing/transient/transient_suppressor_impl.h index 4737af517d..fa8186eed9 100644 --- a/modules/audio_processing/transient/transient_suppressor_impl.h +++ b/modules/audio_processing/transient/transient_suppressor_impl.h @@ -34,22 +34,22 @@ class TransientSuppressorImpl : public TransientSuppressor { int detector_rate_hz, int num_channels) override; - // Processes a |data| chunk, and returns it with keystrokes suppressed from + // Processes a `data` chunk, and returns it with keystrokes suppressed from // it. The float format is assumed to be int16 ranged. If there are more than - // one channel, the chunks are concatenated one after the other in |data|. - // |data_length| must be equal to |data_length_|. - // |num_channels| must be equal to |num_channels_|. - // A sub-band, ideally the higher, can be used as |detection_data|. If it is - // NULL, |data| is used for the detection too. The |detection_data| is always + // one channel, the chunks are concatenated one after the other in `data`. + // `data_length` must be equal to `data_length_`. + // `num_channels` must be equal to `num_channels_`. + // A sub-band, ideally the higher, can be used as `detection_data`. If it is + // NULL, `data` is used for the detection too. The `detection_data` is always // assumed mono. // If a reference signal (e.g. keyboard microphone) is available, it can be - // passed in as |reference_data|. It is assumed mono and must have the same - // length as |data|. NULL is accepted if unavailable. + // passed in as `reference_data`. It is assumed mono and must have the same + // length as `data`. NULL is accepted if unavailable. // This suppressor performs better if voice information is available. - // |voice_probability| is the probability of voice being present in this chunk - // of audio. If voice information is not available, |voice_probability| must + // `voice_probability` is the probability of voice being present in this chunk + // of audio. If voice information is not available, `voice_probability` must // always be set to 1. - // |key_pressed| determines if a key was pressed on this audio chunk. + // `key_pressed` determines if a key was pressed on this audio chunk. // Returns 0 on success and -1 otherwise. int Suppress(float* data, size_t data_length, diff --git a/modules/audio_processing/transient/wpd_node.h b/modules/audio_processing/transient/wpd_node.h index 6a52fb7284..41614fab0f 100644 --- a/modules/audio_processing/transient/wpd_node.h +++ b/modules/audio_processing/transient/wpd_node.h @@ -25,7 +25,7 @@ class WPDNode { WPDNode(size_t length, const float* coefficients, size_t coefficients_length); ~WPDNode(); - // Updates the node data. |parent_data| / 2 must be equals to |length_|. + // Updates the node data. `parent_data` / 2 must be equals to `length_`. // Returns 0 if correct, and -1 otherwise. int Update(const float* parent_data, size_t parent_data_length); diff --git a/modules/audio_processing/transient/wpd_tree.h b/modules/audio_processing/transient/wpd_tree.h index c54220f7fb..13cb8d9c2f 100644 --- a/modules/audio_processing/transient/wpd_tree.h +++ b/modules/audio_processing/transient/wpd_tree.h @@ -65,7 +65,7 @@ class WPDTree { // If level or index are out of bounds the function will return NULL. WPDNode* NodeAt(int level, int index); - // Updates all the nodes of the tree with the new data. |data_length| must be + // Updates all the nodes of the tree with the new data. `data_length` must be // teh same that was used for the creation of the tree. // Returns 0 if correct, and -1 otherwise. int Update(const float* data, size_t data_length); diff --git a/modules/audio_processing/typing_detection.h b/modules/audio_processing/typing_detection.h index d8fb3592c9..9d96583b98 100644 --- a/modules/audio_processing/typing_detection.h +++ b/modules/audio_processing/typing_detection.h @@ -22,7 +22,7 @@ class RTC_EXPORT TypingDetection { // Run the detection algortihm. Shall be called every 10 ms. Returns true if // typing is detected, or false if not, based on the update period as set with - // SetParameters(). See |report_detection_update_period_| description below. + // SetParameters(). See `report_detection_update_period_` description below. bool Process(bool key_pressed, bool vad_activity); // Gets the time in seconds since the last detection. @@ -43,14 +43,14 @@ class RTC_EXPORT TypingDetection { int penalty_counter_; // Counter since last time the detection status reported by Process() was - // updated. See also |report_detection_update_period_|. + // updated. See also `report_detection_update_period_`. int counter_since_last_detection_update_; // The detection status to report. Updated every - // |report_detection_update_period_| call to Process(). + // `report_detection_update_period_` call to Process(). bool detection_to_report_; - // What |detection_to_report_| should be set to next time it is updated. + // What `detection_to_report_` should be set to next time it is updated. bool new_detection_to_report_; // Settable threshold values. @@ -61,10 +61,10 @@ class RTC_EXPORT TypingDetection { // Penalty added for a typing + activity coincide. int cost_per_typing_; - // Threshold for |penalty_counter_|. + // Threshold for `penalty_counter_`. int reporting_threshold_; - // How much we reduce |penalty_counter_| every 10 ms. + // How much we reduce `penalty_counter_` every 10 ms. int penalty_decay_; // How old typing events we allow. diff --git a/modules/audio_processing/utility/cascaded_biquad_filter.cc b/modules/audio_processing/utility/cascaded_biquad_filter.cc index 08b9464387..0d236ce0be 100644 --- a/modules/audio_processing/utility/cascaded_biquad_filter.cc +++ b/modules/audio_processing/utility/cascaded_biquad_filter.cc @@ -99,19 +99,28 @@ void CascadedBiQuadFilter::ApplyBiQuad(rtc::ArrayView x, rtc::ArrayView y, CascadedBiQuadFilter::BiQuad* biquad) { RTC_DCHECK_EQ(x.size(), y.size()); - const auto* c_b = biquad->coefficients.b; - const auto* c_a = biquad->coefficients.a; - auto* m_x = biquad->x; - auto* m_y = biquad->y; + const float c_a_0 = biquad->coefficients.a[0]; + const float c_a_1 = biquad->coefficients.a[1]; + const float c_b_0 = biquad->coefficients.b[0]; + const float c_b_1 = biquad->coefficients.b[1]; + const float c_b_2 = biquad->coefficients.b[2]; + float m_x_0 = biquad->x[0]; + float m_x_1 = biquad->x[1]; + float m_y_0 = biquad->y[0]; + float m_y_1 = biquad->y[1]; for (size_t k = 0; k < x.size(); ++k) { const float tmp = x[k]; - y[k] = c_b[0] * tmp + c_b[1] * m_x[0] + c_b[2] * m_x[1] - c_a[0] * m_y[0] - - c_a[1] * m_y[1]; - m_x[1] = m_x[0]; - m_x[0] = tmp; - m_y[1] = m_y[0]; - m_y[0] = y[k]; + y[k] = c_b_0 * tmp + c_b_1 * m_x_0 + c_b_2 * m_x_1 - c_a_0 * m_y_0 - + c_a_1 * m_y_1; + m_x_1 = m_x_0; + m_x_0 = tmp; + m_y_1 = m_y_0; + m_y_0 = y[k]; } + biquad->x[0] = m_x_0; + biquad->x[1] = m_x_1; + biquad->y[0] = m_y_0; + biquad->y[1] = m_y_1; } } // namespace webrtc diff --git a/modules/audio_processing/utility/delay_estimator.cc b/modules/audio_processing/utility/delay_estimator.cc index 73c70b0c34..6868392f6f 100644 --- a/modules/audio_processing/utility/delay_estimator.cc +++ b/modules/audio_processing/utility/delay_estimator.cc @@ -55,7 +55,7 @@ static int BitCount(uint32_t u32) { return ((int)tmp); } -// Compares the |binary_vector| with all rows of the |binary_matrix| and counts +// Compares the `binary_vector` with all rows of the `binary_matrix` and counts // per row the number of times they have the same value. // // Inputs: @@ -74,7 +74,7 @@ static void BitCountComparison(uint32_t binary_vector, int32_t* bit_counts) { int n = 0; - // Compare |binary_vector| with all rows of the |binary_matrix| + // Compare `binary_vector` with all rows of the `binary_matrix` for (; n < matrix_size; n++) { bit_counts[n] = (int32_t)BitCount(binary_vector ^ binary_matrix[n]); } @@ -83,9 +83,9 @@ static void BitCountComparison(uint32_t binary_vector, // Collects necessary statistics for the HistogramBasedValidation(). This // function has to be called prior to calling HistogramBasedValidation(). The // statistics updated and used by the HistogramBasedValidation() are: -// 1. the number of |candidate_hits|, which states for how long we have had the -// same |candidate_delay| -// 2. the |histogram| of candidate delays over time. This histogram is +// 1. the number of `candidate_hits`, which states for how long we have had the +// same `candidate_delay` +// 2. the `histogram` of candidate delays over time. This histogram is // weighted with respect to a reliability measure and time-varying to cope // with possible delay shifts. // For further description see commented code. @@ -93,7 +93,7 @@ static void BitCountComparison(uint32_t binary_vector, // Inputs: // - candidate_delay : The delay to validate. // - valley_depth_q14 : The cost function has a valley/minimum at the -// |candidate_delay| location. |valley_depth_q14| is the +// `candidate_delay` location. `valley_depth_q14` is the // cost function difference between the minimum and // maximum locations. The value is in the Q14 domain. // - valley_level_q14 : Is the cost function value at the minimum, in Q14. @@ -109,37 +109,37 @@ static void UpdateRobustValidationStatistics(BinaryDelayEstimator* self, int i = 0; RTC_DCHECK_EQ(self->history_size, self->farend->history_size); - // Reset |candidate_hits| if we have a new candidate. + // Reset `candidate_hits` if we have a new candidate. if (candidate_delay != self->last_candidate_delay) { self->candidate_hits = 0; self->last_candidate_delay = candidate_delay; } self->candidate_hits++; - // The |histogram| is updated differently across the bins. - // 1. The |candidate_delay| histogram bin is increased with the - // |valley_depth|, which is a simple measure of how reliable the - // |candidate_delay| is. The histogram is not increased above - // |kHistogramMax|. + // The `histogram` is updated differently across the bins. + // 1. The `candidate_delay` histogram bin is increased with the + // `valley_depth`, which is a simple measure of how reliable the + // `candidate_delay` is. The histogram is not increased above + // `kHistogramMax`. self->histogram[candidate_delay] += valley_depth; if (self->histogram[candidate_delay] > kHistogramMax) { self->histogram[candidate_delay] = kHistogramMax; } - // 2. The histogram bins in the neighborhood of |candidate_delay| are + // 2. The histogram bins in the neighborhood of `candidate_delay` are // unaffected. The neighborhood is defined as x + {-2, -1, 0, 1}. - // 3. The histogram bins in the neighborhood of |last_delay| are decreased - // with |decrease_in_last_set|. This value equals the difference between - // the cost function values at the locations |candidate_delay| and - // |last_delay| until we reach |max_hits_for_slow_change| consecutive hits - // at the |candidate_delay|. If we exceed this amount of hits the - // |candidate_delay| is a "potential" candidate and we start decreasing - // these histogram bins more rapidly with |valley_depth|. + // 3. The histogram bins in the neighborhood of `last_delay` are decreased + // with `decrease_in_last_set`. This value equals the difference between + // the cost function values at the locations `candidate_delay` and + // `last_delay` until we reach `max_hits_for_slow_change` consecutive hits + // at the `candidate_delay`. If we exceed this amount of hits the + // `candidate_delay` is a "potential" candidate and we start decreasing + // these histogram bins more rapidly with `valley_depth`. if (self->candidate_hits < max_hits_for_slow_change) { decrease_in_last_set = (self->mean_bit_counts[self->compare_delay] - valley_level_q14) * kQ14Scaling; } - // 4. All other bins are decreased with |valley_depth|. + // 4. All other bins are decreased with `valley_depth`. // TODO(bjornv): Investigate how to make this loop more efficient. Split up // the loop? Remove parts that doesn't add too much. for (i = 0; i < self->history_size; ++i) { @@ -157,15 +157,15 @@ static void UpdateRobustValidationStatistics(BinaryDelayEstimator* self, } } -// Validates the |candidate_delay|, estimated in WebRtc_ProcessBinarySpectrum(), +// Validates the `candidate_delay`, estimated in WebRtc_ProcessBinarySpectrum(), // based on a mix of counting concurring hits with a modified histogram // of recent delay estimates. In brief a candidate is valid (returns 1) if it // is the most likely according to the histogram. There are a couple of // exceptions that are worth mentioning: -// 1. If the |candidate_delay| < |last_delay| it can be that we are in a +// 1. If the `candidate_delay` < `last_delay` it can be that we are in a // non-causal state, breaking a possible echo control algorithm. Hence, we // open up for a quicker change by allowing the change even if the -// |candidate_delay| is not the most likely one according to the histogram. +// `candidate_delay` is not the most likely one according to the histogram. // 2. There's a minimum number of hits (kMinRequiredHits) and the histogram // value has to reached a minimum (kMinHistogramThreshold) to be valid. // 3. The action is also depending on the filter length used for echo control. @@ -177,7 +177,7 @@ static void UpdateRobustValidationStatistics(BinaryDelayEstimator* self, // - candidate_delay : The delay to validate. // // Return value: -// - is_histogram_valid : 1 - The |candidate_delay| is valid. +// - is_histogram_valid : 1 - The `candidate_delay` is valid. // 0 - Otherwise. static int HistogramBasedValidation(const BinaryDelayEstimator* self, int candidate_delay) { @@ -186,22 +186,22 @@ static int HistogramBasedValidation(const BinaryDelayEstimator* self, const int delay_difference = candidate_delay - self->last_delay; int is_histogram_valid = 0; - // The histogram based validation of |candidate_delay| is done by comparing - // the |histogram| at bin |candidate_delay| with a |histogram_threshold|. - // This |histogram_threshold| equals a |fraction| of the |histogram| at bin - // |last_delay|. The |fraction| is a piecewise linear function of the - // |delay_difference| between the |candidate_delay| and the |last_delay| + // The histogram based validation of `candidate_delay` is done by comparing + // the `histogram` at bin `candidate_delay` with a `histogram_threshold`. + // This `histogram_threshold` equals a `fraction` of the `histogram` at bin + // `last_delay`. The `fraction` is a piecewise linear function of the + // `delay_difference` between the `candidate_delay` and the `last_delay` // allowing for a quicker move if // i) a potential echo control filter can not handle these large differences. - // ii) keeping |last_delay| instead of updating to |candidate_delay| could + // ii) keeping `last_delay` instead of updating to `candidate_delay` could // force an echo control into a non-causal state. // We further require the histogram to have reached a minimum value of - // |kMinHistogramThreshold|. In addition, we also require the number of - // |candidate_hits| to be more than |kMinRequiredHits| to remove spurious + // `kMinHistogramThreshold`. In addition, we also require the number of + // `candidate_hits` to be more than `kMinRequiredHits` to remove spurious // values. - // Calculate a comparison histogram value (|histogram_threshold|) that is - // depending on the distance between the |candidate_delay| and |last_delay|. + // Calculate a comparison histogram value (`histogram_threshold`) that is + // depending on the distance between the `candidate_delay` and `last_delay`. // TODO(bjornv): How much can we gain by turning the fraction calculation // into tables? if (delay_difference > self->allowed_offset) { @@ -226,9 +226,9 @@ static int HistogramBasedValidation(const BinaryDelayEstimator* self, return is_histogram_valid; } -// Performs a robust validation of the |candidate_delay| estimated in +// Performs a robust validation of the `candidate_delay` estimated in // WebRtc_ProcessBinarySpectrum(). The algorithm takes the -// |is_instantaneous_valid| and the |is_histogram_valid| and combines them +// `is_instantaneous_valid` and the `is_histogram_valid` and combines them // into a robust validation. The HistogramBasedValidation() has to be called // prior to this call. // For further description on how the combination is done, see commented code. @@ -250,18 +250,18 @@ static int RobustValidation(const BinaryDelayEstimator* self, int is_robust = 0; // The final robust validation is based on the two algorithms; 1) the - // |is_instantaneous_valid| and 2) the histogram based with result stored in - // |is_histogram_valid|. - // i) Before we actually have a valid estimate (|last_delay| == -2), we say + // `is_instantaneous_valid` and 2) the histogram based with result stored in + // `is_histogram_valid`. + // i) Before we actually have a valid estimate (`last_delay` == -2), we say // a candidate is valid if either algorithm states so - // (|is_instantaneous_valid| OR |is_histogram_valid|). + // (`is_instantaneous_valid` OR `is_histogram_valid`). is_robust = (self->last_delay < 0) && (is_instantaneous_valid || is_histogram_valid); // ii) Otherwise, we need both algorithms to be certain - // (|is_instantaneous_valid| AND |is_histogram_valid|) + // (`is_instantaneous_valid` AND `is_histogram_valid`) is_robust |= is_instantaneous_valid && is_histogram_valid; // iii) With one exception, i.e., the histogram based algorithm can overrule - // the instantaneous one if |is_histogram_valid| = 1 and the histogram + // the instantaneous one if `is_histogram_valid` = 1 and the histogram // is significantly strong. is_robust |= is_histogram_valid && (self->histogram[candidate_delay] > self->last_delay_histogram); @@ -373,13 +373,13 @@ void WebRtc_SoftResetBinaryDelayEstimatorFarend( void WebRtc_AddBinaryFarSpectrum(BinaryDelayEstimatorFarend* handle, uint32_t binary_far_spectrum) { RTC_DCHECK(handle); - // Shift binary spectrum history and insert current |binary_far_spectrum|. + // Shift binary spectrum history and insert current `binary_far_spectrum`. memmove(&(handle->binary_far_history[1]), &(handle->binary_far_history[0]), (handle->history_size - 1) * sizeof(uint32_t)); handle->binary_far_history[0] = binary_far_spectrum; // Shift history of far-end binary spectrum bit counts and insert bit count - // of current |binary_far_spectrum|. + // of current `binary_far_spectrum`. memmove(&(handle->far_bit_counts[1]), &(handle->far_bit_counts[0]), (handle->history_size - 1) * sizeof(int)); handle->far_bit_counts[0] = BitCount(binary_far_spectrum); @@ -402,7 +402,7 @@ void WebRtc_FreeBinaryDelayEstimator(BinaryDelayEstimator* self) { free(self->histogram); self->histogram = NULL; - // BinaryDelayEstimator does not have ownership of |farend|, hence we do not + // BinaryDelayEstimator does not have ownership of `farend`, hence we do not // free the memory here. That should be handled separately by the user. self->farend = NULL; @@ -454,8 +454,8 @@ int WebRtc_AllocateHistoryBufferMemory(BinaryDelayEstimator* self, // Only update far-end buffers if we need. history_size = WebRtc_AllocateFarendBufferMemory(far, history_size); } - // The extra array element in |mean_bit_counts| and |histogram| is a dummy - // element only used while |last_delay| == -2, i.e., before we have a valid + // The extra array element in `mean_bit_counts` and `histogram` is a dummy + // element only used while `last_delay` == -2, i.e., before we have a valid // estimate. self->mean_bit_counts = static_cast( realloc(self->mean_bit_counts, @@ -539,36 +539,36 @@ int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* self, } if (self->near_history_size > 1) { // If we apply lookahead, shift near-end binary spectrum history. Insert - // current |binary_near_spectrum| and pull out the delayed one. + // current `binary_near_spectrum` and pull out the delayed one. memmove(&(self->binary_near_history[1]), &(self->binary_near_history[0]), (self->near_history_size - 1) * sizeof(uint32_t)); self->binary_near_history[0] = binary_near_spectrum; binary_near_spectrum = self->binary_near_history[self->lookahead]; } - // Compare with delayed spectra and store the |bit_counts| for each delay. + // Compare with delayed spectra and store the `bit_counts` for each delay. BitCountComparison(binary_near_spectrum, self->farend->binary_far_history, self->history_size, self->bit_counts); - // Update |mean_bit_counts|, which is the smoothed version of |bit_counts|. + // Update `mean_bit_counts`, which is the smoothed version of `bit_counts`. for (i = 0; i < self->history_size; i++) { - // |bit_counts| is constrained to [0, 32], meaning we can smooth with a + // `bit_counts` is constrained to [0, 32], meaning we can smooth with a // factor up to 2^26. We use Q9. int32_t bit_count = (self->bit_counts[i] << 9); // Q9. - // Update |mean_bit_counts| only when far-end signal has something to - // contribute. If |far_bit_counts| is zero the far-end signal is weak and + // Update `mean_bit_counts` only when far-end signal has something to + // contribute. If `far_bit_counts` is zero the far-end signal is weak and // we likely have a poor echo condition, hence don't update. if (self->farend->far_bit_counts[i] > 0) { - // Make number of right shifts piecewise linear w.r.t. |far_bit_counts|. + // Make number of right shifts piecewise linear w.r.t. `far_bit_counts`. int shifts = kShiftsAtZero; shifts -= (kShiftsLinearSlope * self->farend->far_bit_counts[i]) >> 4; WebRtc_MeanEstimatorFix(bit_count, shifts, &(self->mean_bit_counts[i])); } } - // Find |candidate_delay|, |value_best_candidate| and |value_worst_candidate| - // of |mean_bit_counts|. + // Find `candidate_delay`, `value_best_candidate` and `value_worst_candidate` + // of `mean_bit_counts`. for (i = 0; i < self->history_size; i++) { if (self->mean_bit_counts[i] < value_best_candidate) { value_best_candidate = self->mean_bit_counts[i]; @@ -580,25 +580,25 @@ int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* self, } valley_depth = value_worst_candidate - value_best_candidate; - // The |value_best_candidate| is a good indicator on the probability of - // |candidate_delay| being an accurate delay (a small |value_best_candidate| + // The `value_best_candidate` is a good indicator on the probability of + // `candidate_delay` being an accurate delay (a small `value_best_candidate` // means a good binary match). In the following sections we make a decision - // whether to update |last_delay| or not. + // whether to update `last_delay` or not. // 1) If the difference bit counts between the best and the worst delay // candidates is too small we consider the situation to be unreliable and - // don't update |last_delay|. - // 2) If the situation is reliable we update |last_delay| if the value of the + // don't update `last_delay`. + // 2) If the situation is reliable we update `last_delay` if the value of the // best candidate delay has a value less than - // i) an adaptive threshold |minimum_probability|, or - // ii) this corresponding value |last_delay_probability|, but updated at + // i) an adaptive threshold `minimum_probability`, or + // ii) this corresponding value `last_delay_probability`, but updated at // this time instant. - // Update |minimum_probability|. + // Update `minimum_probability`. if ((self->minimum_probability > kProbabilityLowerLimit) && (valley_depth > kProbabilityMinSpread)) { // The "hard" threshold can't be lower than 17 (in Q9). // The valley in the curve also has to be distinct, i.e., the - // difference between |value_worst_candidate| and |value_best_candidate| has + // difference between `value_worst_candidate` and `value_best_candidate` has // to be large enough. int32_t threshold = value_best_candidate + kProbabilityOffset; if (threshold < kProbabilityLowerLimit) { @@ -608,17 +608,17 @@ int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* self, self->minimum_probability = threshold; } } - // Update |last_delay_probability|. + // Update `last_delay_probability`. // We use a Markov type model, i.e., a slowly increasing level over time. self->last_delay_probability++; - // Validate |candidate_delay|. We have a reliable instantaneous delay + // Validate `candidate_delay`. We have a reliable instantaneous delay // estimate if - // 1) The valley is distinct enough (|valley_depth| > |kProbabilityOffset|) + // 1) The valley is distinct enough (`valley_depth` > `kProbabilityOffset`) // and // 2) The depth of the valley is deep enough - // (|value_best_candidate| < |minimum_probability|) + // (`value_best_candidate` < `minimum_probability`) // and deeper than the best estimate so far - // (|value_best_candidate| < |last_delay_probability|) + // (`value_best_candidate` < `last_delay_probability`) valid_candidate = ((valley_depth > kProbabilityOffset) && ((value_best_candidate < self->minimum_probability) || (value_best_candidate < self->last_delay_probability))); @@ -650,7 +650,7 @@ int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* self, (self->histogram[candidate_delay] > kLastHistogramMax ? kLastHistogramMax : self->histogram[candidate_delay]); - // Adjust the histogram if we made a change to |last_delay|, though it was + // Adjust the histogram if we made a change to `last_delay`, though it was // not the most likely one according to the histogram. if (self->histogram[candidate_delay] < self->histogram[self->compare_delay]) { @@ -680,7 +680,7 @@ float WebRtc_binary_last_delay_quality(BinaryDelayEstimator* self) { // Simply a linear function of the histogram height at delay estimate. quality = self->histogram[self->compare_delay] / kHistogramMax; } else { - // Note that |last_delay_probability| states how deep the minimum of the + // Note that `last_delay_probability` states how deep the minimum of the // cost function is, so it is rather an error probability. quality = (float)(kMaxBitCountsQ9 - self->last_delay_probability) / kMaxBitCountsQ9; diff --git a/modules/audio_processing/utility/delay_estimator.h b/modules/audio_processing/utility/delay_estimator.h index df281bcfdb..b6fc36a759 100644 --- a/modules/audio_processing/utility/delay_estimator.h +++ b/modules/audio_processing/utility/delay_estimator.h @@ -81,7 +81,7 @@ void WebRtc_FreeBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self); // // Return value: // - BinaryDelayEstimatorFarend* -// : Created |handle|. If the memory can't be allocated +// : Created `handle`. If the memory can't be allocated // or if any of the input parameters are invalid NULL // is returned. // @@ -159,7 +159,7 @@ BinaryDelayEstimator* WebRtc_CreateBinaryDelayEstimator( BinaryDelayEstimatorFarend* farend, int max_lookahead); -// Re-allocates |history_size| dependent buffers. The far-end buffers will be +// Re-allocates `history_size` dependent buffers. The far-end buffers will be // updated at the same time if needed. // // Input: @@ -237,7 +237,7 @@ int WebRtc_binary_last_delay(BinaryDelayEstimator* self); // delay value. float WebRtc_binary_last_delay_quality(BinaryDelayEstimator* self); -// Updates the |mean_value| recursively with a step size of 2^-|factor|. This +// Updates the `mean_value` recursively with a step size of 2^-`factor`. This // function is used internally in the Binary Delay Estimator as well as the // Fixed point wrapper. // diff --git a/modules/audio_processing/utility/delay_estimator_internal.h b/modules/audio_processing/utility/delay_estimator_internal.h index fce95d80d7..891e20027d 100644 --- a/modules/audio_processing/utility/delay_estimator_internal.h +++ b/modules/audio_processing/utility/delay_estimator_internal.h @@ -25,7 +25,7 @@ typedef union { typedef struct { // Pointers to mean values of spectrum. SpectrumType* mean_far_spectrum; - // |mean_far_spectrum| initialization indicator. + // `mean_far_spectrum` initialization indicator. int far_spectrum_initialized; int spectrum_size; @@ -37,7 +37,7 @@ typedef struct { typedef struct { // Pointers to mean values of spectrum. SpectrumType* mean_near_spectrum; - // |mean_near_spectrum| initialization indicator. + // `mean_near_spectrum` initialization indicator. int near_spectrum_initialized; int spectrum_size; diff --git a/modules/audio_processing/utility/delay_estimator_unittest.cc b/modules/audio_processing/utility/delay_estimator_unittest.cc index 65d8e147fd..651d836c82 100644 --- a/modules/audio_processing/utility/delay_estimator_unittest.cc +++ b/modules/audio_processing/utility/delay_estimator_unittest.cc @@ -80,7 +80,7 @@ DelayEstimatorTest::DelayEstimatorTest() memset(far_u16_, 1, sizeof(far_u16_)); memset(near_u16_, 2, sizeof(near_u16_)); // Construct a sequence of binary spectra used to verify delay estimate. The - // |kSequenceLength| has to be long enough for the delay estimation to leave + // `kSequenceLength` has to be long enough for the delay estimation to leave // the initialized state. binary_spectrum_[0] = 1; for (int i = 1; i < (kSequenceLength + kHistorySize); i++) { @@ -132,7 +132,7 @@ void DelayEstimatorTest::InitBinary() { // Initialize Binary Delay Estimator WebRtc_InitBinaryDelayEstimator(binary_); // Verify initialization. This does not guarantee a complete check, since - // |last_delay| may be equal to -2 before initialization if done on the fly. + // `last_delay` may be equal to -2 before initialization if done on the fly. EXPECT_EQ(-2, binary_->last_delay); } @@ -144,7 +144,7 @@ void DelayEstimatorTest::VerifyDelay(BinaryDelayEstimator* binary_handle, if (delay != -2) { // Verify correct delay estimate. In the non-causal case the true delay - // is equivalent with the |offset|. + // is equivalent with the `offset`. EXPECT_EQ(offset, delay); } } @@ -160,7 +160,7 @@ void DelayEstimatorTest::RunBinarySpectra(BinaryDelayEstimator* binary1, WebRtc_InitBinaryDelayEstimator(binary1); WebRtc_InitBinaryDelayEstimator(binary2); // Verify initialization. This does not guarantee a complete check, since - // |last_delay| may be equal to -2 before initialization if done on the fly. + // `last_delay` may be equal to -2 before initialization if done on the fly. EXPECT_EQ(-2, binary1->last_delay); EXPECT_EQ(-2, binary2->last_delay); for (int i = kLookahead; i < (kSequenceLength + kLookahead); i++) { @@ -174,12 +174,12 @@ void DelayEstimatorTest::RunBinarySpectra(BinaryDelayEstimator* binary1, VerifyDelay(binary2, far_offset + kLookahead + lookahead_offset + near_offset, delay_2); - // Expect the two delay estimates to be offset by |lookahead_offset| + - // |near_offset| when we have left the initial state. + // Expect the two delay estimates to be offset by `lookahead_offset` + + // `near_offset` when we have left the initial state. if ((delay_1 != -2) && (delay_2 != -2)) { EXPECT_EQ(delay_1, delay_2 - lookahead_offset - near_offset); } - // For the case of identical signals |delay_1| and |delay_2| should match + // For the case of identical signals `delay_1` and `delay_2` should match // all the time, unless one of them has robust validation turned on. In // that case the robust validation leaves the initial state faster. if ((near_offset == 0) && (lookahead_offset == 0)) { @@ -208,8 +208,8 @@ void DelayEstimatorTest::RunBinarySpectraTest(int near_offset, BinaryDelayEstimator* binary2 = WebRtc_CreateBinaryDelayEstimator( binary_farend_, kLookahead + lookahead_offset); // Verify the delay for both causal and non-causal systems. For causal systems - // the delay is equivalent with a positive |offset| of the far-end sequence. - // For non-causal systems the delay is equivalent with a negative |offset| of + // the delay is equivalent with a positive `offset` of the far-end sequence. + // For non-causal systems the delay is equivalent with a negative `offset` of // the far-end sequence. binary_->robust_validation_enabled = ref_robust_validation; binary2->robust_validation_enabled = robust_validation; @@ -242,23 +242,23 @@ TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfWrapper) { EXPECT_TRUE(handle == NULL); // WebRtc_InitDelayEstimatorFarend() and WebRtc_InitDelayEstimator() should - // return -1 if we have a NULL pointer as |handle|. + // return -1 if we have a NULL pointer as `handle`. EXPECT_EQ(-1, WebRtc_InitDelayEstimatorFarend(NULL)); EXPECT_EQ(-1, WebRtc_InitDelayEstimator(NULL)); // WebRtc_AddFarSpectrumFloat() should return -1 if we have: - // 1) NULL pointer as |handle|. + // 1) NULL pointer as `handle`. // 2) NULL pointer as far-end spectrum. // 3) Incorrect spectrum size. EXPECT_EQ(-1, WebRtc_AddFarSpectrumFloat(NULL, far_f_, spectrum_size_)); - // Use |farend_handle_| which is properly created at SetUp(). + // Use `farend_handle_` which is properly created at SetUp(). EXPECT_EQ(-1, WebRtc_AddFarSpectrumFloat(farend_handle_, NULL, spectrum_size_)); EXPECT_EQ(-1, WebRtc_AddFarSpectrumFloat(farend_handle_, far_f_, spectrum_size_ + 1)); // WebRtc_AddFarSpectrumFix() should return -1 if we have: - // 1) NULL pointer as |handle|. + // 1) NULL pointer as `handle`. // 2) NULL pointer as far-end spectrum. // 3) Incorrect spectrum size. // 4) Too high precision in far-end spectrum (Q-domain > 15). @@ -271,8 +271,8 @@ TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfWrapper) { spectrum_size_, 16)); // WebRtc_set_history_size() should return -1 if: - // 1) |handle| is a NULL. - // 2) |history_size| <= 1. + // 1) `handle` is a NULL. + // 2) `history_size` <= 1. EXPECT_EQ(-1, WebRtc_set_history_size(NULL, 1)); EXPECT_EQ(-1, WebRtc_set_history_size(handle_, 1)); // WebRtc_history_size() should return -1 if: @@ -293,43 +293,43 @@ TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfWrapper) { EXPECT_EQ(-1, WebRtc_set_lookahead(handle_, -1)); // WebRtc_set_allowed_offset() should return -1 if we have: - // 1) NULL pointer as |handle|. - // 2) |allowed_offset| < 0. + // 1) NULL pointer as `handle`. + // 2) `allowed_offset` < 0. EXPECT_EQ(-1, WebRtc_set_allowed_offset(NULL, 0)); EXPECT_EQ(-1, WebRtc_set_allowed_offset(handle_, -1)); EXPECT_EQ(-1, WebRtc_get_allowed_offset(NULL)); // WebRtc_enable_robust_validation() should return -1 if we have: - // 1) NULL pointer as |handle|. - // 2) Incorrect |enable| value (not 0 or 1). + // 1) NULL pointer as `handle`. + // 2) Incorrect `enable` value (not 0 or 1). EXPECT_EQ(-1, WebRtc_enable_robust_validation(NULL, kEnable[0])); EXPECT_EQ(-1, WebRtc_enable_robust_validation(handle_, -1)); EXPECT_EQ(-1, WebRtc_enable_robust_validation(handle_, 2)); // WebRtc_is_robust_validation_enabled() should return -1 if we have NULL - // pointer as |handle|. + // pointer as `handle`. EXPECT_EQ(-1, WebRtc_is_robust_validation_enabled(NULL)); // WebRtc_DelayEstimatorProcessFloat() should return -1 if we have: - // 1) NULL pointer as |handle|. + // 1) NULL pointer as `handle`. // 2) NULL pointer as near-end spectrum. // 3) Incorrect spectrum size. // 4) Non matching history sizes if multiple delay estimators using the same // far-end reference. EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFloat(NULL, near_f_, spectrum_size_)); - // Use |handle_| which is properly created at SetUp(). + // Use `handle_` which is properly created at SetUp(). EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFloat(handle_, NULL, spectrum_size_)); EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFloat(handle_, near_f_, spectrum_size_ + 1)); - // |tmp_handle| is already in a non-matching state. + // `tmp_handle` is already in a non-matching state. EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFloat(tmp_handle, near_f_, spectrum_size_)); // WebRtc_DelayEstimatorProcessFix() should return -1 if we have: - // 1) NULL pointer as |handle|. + // 1) NULL pointer as `handle`. // 2) NULL pointer as near-end spectrum. // 3) Incorrect spectrum size. // 4) Too high precision in near-end spectrum (Q-domain > 15). @@ -343,12 +343,12 @@ TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfWrapper) { spectrum_size_ + 1, 0)); EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFix(handle_, near_u16_, spectrum_size_, 16)); - // |tmp_handle| is already in a non-matching state. + // `tmp_handle` is already in a non-matching state. EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFix(tmp_handle, near_u16_, spectrum_size_, 0)); WebRtc_FreeDelayEstimator(tmp_handle); - // WebRtc_last_delay() should return -1 if we have a NULL pointer as |handle|. + // WebRtc_last_delay() should return -1 if we have a NULL pointer as `handle`. EXPECT_EQ(-1, WebRtc_last_delay(NULL)); // Free any local memory if needed. @@ -422,7 +422,7 @@ TEST_F(DelayEstimatorTest, InitializedSpectrumAfterProcess) { TEST_F(DelayEstimatorTest, CorrectLastDelay) { // In this test we verify that we get the correct last delay upon valid call. // We simply process the same data until we leave the initialized state - // (|last_delay| = -2). Then we compare the Process() output with the + // (`last_delay` = -2). Then we compare the Process() output with the // last_delay() call. // TODO(bjornv): Update quality values for robust validation. @@ -488,8 +488,8 @@ TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfBinaryEstimator) { BinaryDelayEstimator* binary_handle = binary_; // WebRtc_CreateBinaryDelayEstimator() should return -1 if we have a NULL - // pointer as |binary_farend| or invalid input values. Upon failure, the - // |binary_handle| should be NULL. + // pointer as `binary_farend` or invalid input values. Upon failure, the + // `binary_handle` should be NULL. // Make sure we have a non-NULL value at start, so we can detect NULL after // create failure. binary_handle = WebRtc_CreateBinaryDelayEstimator(NULL, kLookahead); @@ -506,12 +506,12 @@ TEST_F(DelayEstimatorTest, MeanEstimatorFix) { int32_t mean_value_before = mean_value; int32_t new_mean_value = mean_value * 2; - // Increasing |mean_value|. + // Increasing `mean_value`. WebRtc_MeanEstimatorFix(new_mean_value, 10, &mean_value); EXPECT_LT(mean_value_before, mean_value); EXPECT_GT(new_mean_value, mean_value); - // Decreasing |mean_value|. + // Decreasing `mean_value`. new_mean_value = mean_value / 2; mean_value_before = mean_value; WebRtc_MeanEstimatorFix(new_mean_value, 10, &mean_value); @@ -569,7 +569,7 @@ TEST_F(DelayEstimatorTest, ExactDelayEstimateMultipleNearDifferentLookahead) { TEST_F(DelayEstimatorTest, AllowedOffsetNoImpactWhenRobustValidationDisabled) { // The same setup as in ExactDelayEstimateMultipleNearSameSpectrum with the - // difference that |allowed_offset| is set for the reference binary delay + // difference that `allowed_offset` is set for the reference binary delay // estimator. binary_->allowed_offset = 10; diff --git a/modules/audio_processing/utility/delay_estimator_wrapper.cc b/modules/audio_processing/utility/delay_estimator_wrapper.cc index 8eac2f6974..521a8a0997 100644 --- a/modules/audio_processing/utility/delay_estimator_wrapper.cc +++ b/modules/audio_processing/utility/delay_estimator_wrapper.cc @@ -19,8 +19,8 @@ namespace webrtc { -// Only bit |kBandFirst| through bit |kBandLast| are processed and -// |kBandFirst| - |kBandLast| must be < 32. +// Only bit `kBandFirst` through bit `kBandLast` are processed and +// `kBandFirst` - `kBandLast` must be < 32. enum { kBandFirst = 12 }; enum { kBandLast = 43 }; @@ -48,8 +48,8 @@ static void MeanEstimatorFloat(float new_value, *mean_value += (new_value - *mean_value) * scale; } -// Computes the binary spectrum by comparing the input |spectrum| with a -// |threshold_spectrum|. Float and fixed point versions. +// Computes the binary spectrum by comparing the input `spectrum` with a +// `threshold_spectrum`. Float and fixed point versions. // // Inputs: // - spectrum : Spectrum of which the binary spectrum should be @@ -69,11 +69,11 @@ static uint32_t BinarySpectrumFix(const uint16_t* spectrum, RTC_DCHECK_LT(q_domain, 16); if (!(*threshold_initialized)) { - // Set the |threshold_spectrum| to half the input |spectrum| as starting + // Set the `threshold_spectrum` to half the input `spectrum` as starting // value. This speeds up the convergence. for (i = kBandFirst; i <= kBandLast; i++) { if (spectrum[i] > 0) { - // Convert input spectrum from Q(|q_domain|) to Q15. + // Convert input spectrum from Q(`q_domain`) to Q15. int32_t spectrum_q15 = ((int32_t)spectrum[i]) << (15 - q_domain); threshold_spectrum[i].int32_ = (spectrum_q15 >> 1); *threshold_initialized = 1; @@ -81,11 +81,11 @@ static uint32_t BinarySpectrumFix(const uint16_t* spectrum, } } for (i = kBandFirst; i <= kBandLast; i++) { - // Convert input spectrum from Q(|q_domain|) to Q15. + // Convert input spectrum from Q(`q_domain`) to Q15. int32_t spectrum_q15 = ((int32_t)spectrum[i]) << (15 - q_domain); - // Update the |threshold_spectrum|. + // Update the `threshold_spectrum`. WebRtc_MeanEstimatorFix(spectrum_q15, 6, &(threshold_spectrum[i].int32_)); - // Convert |spectrum| at current frequency bin to a binary value. + // Convert `spectrum` at current frequency bin to a binary value. if (spectrum_q15 > threshold_spectrum[i].int32_) { out = SetBit(out, i - kBandFirst); } @@ -102,7 +102,7 @@ static uint32_t BinarySpectrumFloat(const float* spectrum, const float kScale = 1 / 64.0; if (!(*threshold_initialized)) { - // Set the |threshold_spectrum| to half the input |spectrum| as starting + // Set the `threshold_spectrum` to half the input `spectrum` as starting // value. This speeds up the convergence. for (i = kBandFirst; i <= kBandLast; i++) { if (spectrum[i] > 0.0f) { @@ -113,9 +113,9 @@ static uint32_t BinarySpectrumFloat(const float* spectrum, } for (i = kBandFirst; i <= kBandLast; i++) { - // Update the |threshold_spectrum|. + // Update the `threshold_spectrum`. MeanEstimatorFloat(spectrum[i], kScale, &(threshold_spectrum[i].float_)); - // Convert |spectrum| at current frequency bin to a binary value. + // Convert `spectrum` at current frequency bin to a binary value. if (spectrum[i] > threshold_spectrum[i].float_) { out = SetBit(out, i - kBandFirst); } @@ -219,7 +219,7 @@ int WebRtc_AddFarSpectrumFix(void* handle, return -1; } if (far_q > 15) { - // If |far_q| is larger than 15 we cannot guarantee no wrap around. + // If `far_q` is larger than 15 we cannot guarantee no wrap around. return -1; } @@ -433,7 +433,7 @@ int WebRtc_DelayEstimatorProcessFix(void* handle, return -1; } if (near_q > 15) { - // If |near_q| is larger than 15 we cannot guarantee no wrap around. + // If `near_q` is larger than 15 we cannot guarantee no wrap around. return -1; } diff --git a/modules/audio_processing/utility/delay_estimator_wrapper.h b/modules/audio_processing/utility/delay_estimator_wrapper.h index dbcafaf013..a90cbe31cb 100644 --- a/modules/audio_processing/utility/delay_estimator_wrapper.h +++ b/modules/audio_processing/utility/delay_estimator_wrapper.h @@ -35,7 +35,7 @@ void WebRtc_FreeDelayEstimatorFarend(void* handle); // determined together with WebRtc_set_lookahead(). // // Return value: -// - void* : Created |handle|. If the memory can't be allocated or +// - void* : Created `handle`. If the memory can't be allocated or // if any of the input parameters are invalid NULL is // returned. void* WebRtc_CreateDelayEstimatorFarend(int spectrum_size, int history_size); @@ -85,13 +85,13 @@ void WebRtc_FreeDelayEstimator(void* handle); // WebRtc_CreateDelayEstimatorFarend(). // // Note that WebRtc_CreateDelayEstimator does not take -// ownership of |farend_handle|, which has to be torn +// ownership of `farend_handle`, which has to be torn // down properly after this instance. // // - max_lookahead : Maximum amount of non-causal lookahead allowed. The // actual amount of lookahead used can be controlled by -// WebRtc_set_lookahead(...). The default |lookahead| is -// set to |max_lookahead| at create time. Use +// WebRtc_set_lookahead(...). The default `lookahead` is +// set to `max_lookahead` at create time. Use // WebRtc_set_lookahead(...) before start if a different // value is desired. // @@ -106,12 +106,12 @@ void WebRtc_FreeDelayEstimator(void* handle); // estimated. // // Note that the effective range of delay estimates is -// [-|lookahead|,... ,|history_size|-|lookahead|) -// where |history_size| is set through +// [-`lookahead`,... ,`history_size`-`lookahead`) +// where `history_size` is set through // WebRtc_set_history_size(). // // Return value: -// - void* : Created |handle|. If the memory can't be allocated or +// - void* : Created `handle`. If the memory can't be allocated or // if any of the input parameters are invalid NULL is // returned. void* WebRtc_CreateDelayEstimator(void* farend_handle, int max_lookahead); @@ -129,12 +129,12 @@ int WebRtc_InitDelayEstimator(void* handle); // - actual_shifts : The actual number of shifts performed. int WebRtc_SoftResetDelayEstimator(void* handle, int delay_shift); -// Sets the effective |history_size| used. Valid values from 2. We simply need -// at least two delays to compare to perform an estimate. If |history_size| is +// Sets the effective `history_size` used. Valid values from 2. We simply need +// at least two delays to compare to perform an estimate. If `history_size` is // changed, buffers are reallocated filling in with zeros if necessary. -// Note that changing the |history_size| affects both buffers in far-end and +// Note that changing the `history_size` affects both buffers in far-end and // near-end. Hence it is important to change all DelayEstimators that use the -// same reference far-end, to the same |history_size| value. +// same reference far-end, to the same `history_size` value. // Inputs: // - handle : Pointer to the delay estimation instance. // - history_size : Effective history size to be used. @@ -148,8 +148,8 @@ int WebRtc_set_history_size(void* handle, int history_size); // - handle : Pointer to the delay estimation instance. int WebRtc_history_size(const void* handle); -// Sets the amount of |lookahead| to use. Valid values are [0, max_lookahead] -// where |max_lookahead| was set at create time through +// Sets the amount of `lookahead` to use. Valid values are [0, max_lookahead] +// where `max_lookahead` was set at create time through // WebRtc_CreateDelayEstimator(...). // // Input: @@ -157,8 +157,8 @@ int WebRtc_history_size(const void* handle); // - lookahead : The amount of lookahead to be used. // // Return value: -// - new_lookahead : The actual amount of lookahead set, unless |handle| is -// a NULL pointer or |lookahead| is invalid, for which an +// - new_lookahead : The actual amount of lookahead set, unless `handle` is +// a NULL pointer or `lookahead` is invalid, for which an // error is returned. int WebRtc_set_lookahead(void* handle, int lookahead); @@ -167,12 +167,12 @@ int WebRtc_set_lookahead(void* handle, int lookahead); // - handle : Pointer to the delay estimation instance. int WebRtc_lookahead(void* handle); -// Sets the |allowed_offset| used in the robust validation scheme. If the +// Sets the `allowed_offset` used in the robust validation scheme. If the // delay estimator is used in an echo control component, this parameter is -// related to the filter length. In principle |allowed_offset| should be set to +// related to the filter length. In principle `allowed_offset` should be set to // the echo control filter length minus the expected echo duration, i.e., the // delay offset the echo control can handle without quality regression. The -// default value, used if not set manually, is zero. Note that |allowed_offset| +// default value, used if not set manually, is zero. Note that `allowed_offset` // has to be non-negative. // Inputs: // - handle : Pointer to the delay estimation instance. @@ -180,7 +180,7 @@ int WebRtc_lookahead(void* handle); // the echo control filter can handle. int WebRtc_set_allowed_offset(void* handle, int allowed_offset); -// Returns the |allowed_offset| in number of partitions. +// Returns the `allowed_offset` in number of partitions. int WebRtc_get_allowed_offset(const void* handle); // Enables/Disables a robust validation functionality in the delay estimation. diff --git a/modules/audio_processing/utility/pffft_wrapper.h b/modules/audio_processing/utility/pffft_wrapper.h index 160f0da059..983c2fd1bc 100644 --- a/modules/audio_processing/utility/pffft_wrapper.h +++ b/modules/audio_processing/utility/pffft_wrapper.h @@ -51,7 +51,7 @@ class Pffft { // TODO(https://crbug.com/webrtc/9577): Consider adding a factory and making // the ctor private. // static std::unique_ptr Create(size_t fft_size, - // FftType fft_type); Ctor. |fft_size| must be a supported size (see + // FftType fft_type); Ctor. `fft_size` must be a supported size (see // Pffft::IsValidFftSize()). If not supported, the code will crash. Pffft(size_t fft_size, FftType fft_type); Pffft(const Pffft&) = delete; @@ -73,9 +73,9 @@ class Pffft { // Computes the backward fast Fourier transform. void BackwardTransform(const FloatBuffer& in, FloatBuffer* out, bool ordered); - // Multiplies the frequency components of |fft_x| and |fft_y| and accumulates - // them into |out|. The arrays must have been obtained with - // ForwardTransform(..., /*ordered=*/false) - i.e., |fft_x| and |fft_y| must + // Multiplies the frequency components of `fft_x` and `fft_y` and accumulates + // them into `out`. The arrays must have been obtained with + // ForwardTransform(..., /*ordered=*/false) - i.e., `fft_x` and `fft_y` must // not be ordered. void FrequencyDomainConvolve(const FloatBuffer& fft_x, const FloatBuffer& fft_y, diff --git a/modules/audio_processing/vad/gmm.h b/modules/audio_processing/vad/gmm.h index 93eb675c46..d9d68ecfdc 100644 --- a/modules/audio_processing/vad/gmm.h +++ b/modules/audio_processing/vad/gmm.h @@ -20,13 +20,13 @@ namespace webrtc { // Where a 'mixture' is a Gaussian density. struct GmmParameters { - // weight[n] = log(w[n]) - |dimension|/2 * log(2*pi) - 1/2 * log(det(cov[n])); + // weight[n] = log(w[n]) - `dimension`/2 * log(2*pi) - 1/2 * log(det(cov[n])); // where cov[n] is the covariance matrix of mixture n; const double* weight; - // pointer to the first element of a |num_mixtures|x|dimension| matrix + // pointer to the first element of a `num_mixtures`x`dimension` matrix // where kth row is the mean of the kth mixture. const double* mean; - // pointer to the first element of a |num_mixtures|x|dimension|x|dimension| + // pointer to the first element of a `num_mixtures`x`dimension`x`dimension` // 3D-matrix, where the kth 2D-matrix is the inverse of the covariance // matrix of the kth mixture. const double* covar_inverse; @@ -36,8 +36,8 @@ struct GmmParameters { int num_mixtures; }; -// Evaluate the given GMM, according to |gmm_parameters|, at the given point -// |x|. If the dimensionality of the given GMM is larger that the maximum +// Evaluate the given GMM, according to `gmm_parameters`, at the given point +// `x`. If the dimensionality of the given GMM is larger that the maximum // acceptable dimension by the following function -1 is returned. double EvaluateGmm(const double* x, const GmmParameters& gmm_parameters); diff --git a/modules/audio_processing/vad/pitch_based_vad.h b/modules/audio_processing/vad/pitch_based_vad.h index e005e23aa5..fa3abc2d28 100644 --- a/modules/audio_processing/vad/pitch_based_vad.h +++ b/modules/audio_processing/vad/pitch_based_vad.h @@ -34,7 +34,7 @@ class PitchBasedVad { // p_combined: an array which contains the combined activity probabilities // computed prior to the call of this function. The method, // then, computes the voicing probabilities and combine them - // with the given values. The result are returned in |p|. + // with the given values. The result are returned in `p`. int VoicingProbability(const AudioFeatures& features, double* p_combined); private: diff --git a/modules/audio_processing/vad/pitch_internal.h b/modules/audio_processing/vad/pitch_internal.h index 938745d2a3..e382c1fbde 100644 --- a/modules/audio_processing/vad/pitch_internal.h +++ b/modules/audio_processing/vad/pitch_internal.h @@ -14,7 +14,7 @@ namespace webrtc { // TODO(turajs): Write a description of this function. Also be consistent with -// usage of |sampling_rate_hz| vs |kSamplingFreqHz|. +// usage of `sampling_rate_hz` vs `kSamplingFreqHz`. void GetSubframesPitchParameters(int sampling_rate_hz, double* gains, double* lags, diff --git a/modules/audio_processing/vad/standalone_vad.h b/modules/audio_processing/vad/standalone_vad.h index 3dff4163c2..b08463374e 100644 --- a/modules/audio_processing/vad/standalone_vad.h +++ b/modules/audio_processing/vad/standalone_vad.h @@ -26,12 +26,12 @@ class StandaloneVad { // Outputs // p: a buffer where probabilities are written to. - // length_p: number of elements of |p|. + // length_p: number of elements of `p`. // // return value: // -1: if no audio is stored or VAD returns error. // 0: in success. - // In case of error the content of |activity| is unchanged. + // In case of error the content of `activity` is unchanged. // // Note that due to a high false-positive (VAD decision is active while the // processed audio is just background noise) rate, stand-alone VAD is used as diff --git a/modules/audio_processing/vad/standalone_vad_unittest.cc b/modules/audio_processing/vad/standalone_vad_unittest.cc index 22b1f4961c..0fa2ed78b1 100644 --- a/modules/audio_processing/vad/standalone_vad_unittest.cc +++ b/modules/audio_processing/vad/standalone_vad_unittest.cc @@ -31,7 +31,7 @@ TEST(StandaloneVadTest, Api) { for (size_t n = 0; n < kMaxNumFrames; n++) EXPECT_EQ(0, vad->AddAudio(data, kLength10Ms)); - // Pretend |p| is shorter that it should be. + // Pretend `p` is shorter that it should be. EXPECT_EQ(-1, vad->GetActivity(p, kMaxNumFrames - 1)); EXPECT_EQ(0, vad->GetActivity(p, kMaxNumFrames)); diff --git a/modules/audio_processing/vad/vad_audio_proc.cc b/modules/audio_processing/vad/vad_audio_proc.cc index 97cf65151c..aaf8214d7c 100644 --- a/modules/audio_processing/vad/vad_audio_proc.cc +++ b/modules/audio_processing/vad/vad_audio_proc.cc @@ -132,7 +132,7 @@ void VadAudioProc::SubframeCorrelation(double* corr, kNumSubframeSamples + kNumPastSignalSamples, kLpcOrder); } -// Compute |kNum10msSubframes| sets of LPC coefficients, one per 10 ms input. +// Compute `kNum10msSubframes` sets of LPC coefficients, one per 10 ms input. // The analysis window is 15 ms long and it is centered on the first half of // each 10ms sub-frame. This is equivalent to computing LPC coefficients for the // first half of each 10 ms subframe. @@ -169,7 +169,7 @@ static float QuadraticInterpolation(float prev_val, return fractional_index; } -// 1 / A(z), where A(z) is defined by |lpc| is a model of the spectral envelope +// 1 / A(z), where A(z) is defined by `lpc` is a model of the spectral envelope // of the input signal. The local maximum of the spectral envelope corresponds // with the local minimum of A(z). It saves complexity, as we save one // inversion. Furthermore, we find the first local maximum of magnitude squared, diff --git a/modules/audio_processing/vad/vad_circular_buffer.h b/modules/audio_processing/vad/vad_circular_buffer.h index 46b03d49bf..c1806f9e83 100644 --- a/modules/audio_processing/vad/vad_circular_buffer.h +++ b/modules/audio_processing/vad/vad_circular_buffer.h @@ -38,8 +38,8 @@ class VadCircularBuffer { // The mean value of the elements in the buffer. The return value is zero if // buffer is empty, i.e. no value is inserted. double Mean(); - // Remove transients. If the values exceed |val_threshold| for a period - // shorter then or equal to |width_threshold|, then that period is considered + // Remove transients. If the values exceed `val_threshold` for a period + // shorter then or equal to `width_threshold`, then that period is considered // transient and set to zero. int RemoveTransient(int width_threshold, double val_threshold); @@ -49,7 +49,7 @@ class VadCircularBuffer { // insertion. |index = 1| is the one before the most recent insertion, and // so on. int Get(int index, double* value) const; - // Set a given position to |value|. |index| is interpreted as above. + // Set a given position to `value`. `index` is interpreted as above. int Set(int index, double value); // Return the number of valid elements in the buffer. int BufferLevel(); diff --git a/modules/audio_processing/vad/voice_activity_detector.cc b/modules/audio_processing/vad/voice_activity_detector.cc index f0d34c668e..02023d6a72 100644 --- a/modules/audio_processing/vad/voice_activity_detector.cc +++ b/modules/audio_processing/vad/voice_activity_detector.cc @@ -32,12 +32,13 @@ VoiceActivityDetector::VoiceActivityDetector() VoiceActivityDetector::~VoiceActivityDetector() = default; // Because ISAC has a different chunk length, it updates -// |chunkwise_voice_probabilities_| and |chunkwise_rms_| when there is new data. +// `chunkwise_voice_probabilities_` and `chunkwise_rms_` when there is new data. // Otherwise it clears them. void VoiceActivityDetector::ProcessChunk(const int16_t* audio, size_t length, int sample_rate_hz) { RTC_DCHECK_EQ(length, sample_rate_hz / 100); + // TODO(bugs.webrtc.org/7494): Remove resampling and force 16 kHz audio. // Resample to the required rate. const int16_t* resampled_ptr = audio; if (sample_rate_hz != kSampleRateHz) { @@ -49,7 +50,7 @@ void VoiceActivityDetector::ProcessChunk(const int16_t* audio, } RTC_DCHECK_EQ(length, kLength10Ms); - // Each chunk needs to be passed into |standalone_vad_|, because internally it + // Each chunk needs to be passed into `standalone_vad_`, because internally it // buffers the audio and processes it all at once when GetActivity() is // called. RTC_CHECK_EQ(standalone_vad_->AddAudio(resampled_ptr, length), 0); diff --git a/modules/audio_processing/vad/voice_activity_detector.h b/modules/audio_processing/vad/voice_activity_detector.h index a19883d51c..92b9a8c208 100644 --- a/modules/audio_processing/vad/voice_activity_detector.h +++ b/modules/audio_processing/vad/voice_activity_detector.h @@ -33,6 +33,8 @@ class VoiceActivityDetector { ~VoiceActivityDetector(); // Processes each audio chunk and estimates the voice probability. + // TODO(bugs.webrtc.org/7494): Switch to rtc::ArrayView and remove + // `sample_rate_hz`. void ProcessChunk(const int16_t* audio, size_t length, int sample_rate_hz); // Returns a vector of voice probabilities for each chunk. It can be empty for diff --git a/modules/audio_processing/vad/voice_activity_detector_unittest.cc b/modules/audio_processing/vad/voice_activity_detector_unittest.cc index 3214bd966d..80f21c8db0 100644 --- a/modules/audio_processing/vad/voice_activity_detector_unittest.cc +++ b/modules/audio_processing/vad/voice_activity_detector_unittest.cc @@ -133,7 +133,7 @@ TEST(VoiceActivityDetectorTest, Noise16kHzHasLowVoiceProbabilities) { vad.ProcessChunk(&data[0], data.size(), kSampleRateHz); // Before the |vad has enough data to process an ISAC block it will return - // the default value, 1.f, which would ruin the |max_probability| value. + // the default value, 1.f, which would ruin the `max_probability` value. if (i > kNumChunksPerIsacBlock) { max_probability = std::max(max_probability, vad.last_voice_probability()); } @@ -156,7 +156,7 @@ TEST(VoiceActivityDetectorTest, Noise32kHzHasLowVoiceProbabilities) { vad.ProcessChunk(&data[0], data.size(), 2 * kSampleRateHz); // Before the |vad has enough data to process an ISAC block it will return - // the default value, 1.f, which would ruin the |max_probability| value. + // the default value, 1.f, which would ruin the `max_probability` value. if (i > kNumChunksPerIsacBlock) { max_probability = std::max(max_probability, vad.last_voice_probability()); } diff --git a/modules/audio_processing/voice_detection.cc b/modules/audio_processing/voice_detection.cc deleted file mode 100644 index e6c92ae934..0000000000 --- a/modules/audio_processing/voice_detection.cc +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "modules/audio_processing/voice_detection.h" - -#include "common_audio/vad/include/webrtc_vad.h" -#include "modules/audio_processing/audio_buffer.h" -#include "rtc_base/checks.h" - -namespace webrtc { -class VoiceDetection::Vad { - public: - Vad() { - state_ = WebRtcVad_Create(); - RTC_CHECK(state_); - int error = WebRtcVad_Init(state_); - RTC_DCHECK_EQ(0, error); - } - ~Vad() { WebRtcVad_Free(state_); } - - Vad(Vad&) = delete; - Vad& operator=(Vad&) = delete; - - VadInst* state() { return state_; } - - private: - VadInst* state_ = nullptr; -}; - -VoiceDetection::VoiceDetection(int sample_rate_hz, Likelihood likelihood) - : sample_rate_hz_(sample_rate_hz), - frame_size_samples_(static_cast(sample_rate_hz_ / 100)), - likelihood_(likelihood), - vad_(new Vad()) { - int mode = 2; - switch (likelihood) { - case VoiceDetection::kVeryLowLikelihood: - mode = 3; - break; - case VoiceDetection::kLowLikelihood: - mode = 2; - break; - case VoiceDetection::kModerateLikelihood: - mode = 1; - break; - case VoiceDetection::kHighLikelihood: - mode = 0; - break; - default: - RTC_NOTREACHED(); - break; - } - int error = WebRtcVad_set_mode(vad_->state(), mode); - RTC_DCHECK_EQ(0, error); -} - -VoiceDetection::~VoiceDetection() {} - -bool VoiceDetection::ProcessCaptureAudio(AudioBuffer* audio) { - RTC_DCHECK_GE(AudioBuffer::kMaxSplitFrameLength, - audio->num_frames_per_band()); - std::array mixed_low_pass_data; - rtc::ArrayView mixed_low_pass(mixed_low_pass_data.data(), - audio->num_frames_per_band()); - if (audio->num_channels() == 1) { - FloatS16ToS16(audio->split_bands_const(0)[kBand0To8kHz], - audio->num_frames_per_band(), mixed_low_pass_data.data()); - } else { - const int num_channels = static_cast(audio->num_channels()); - for (size_t i = 0; i < audio->num_frames_per_band(); ++i) { - int32_t value = - FloatS16ToS16(audio->split_channels_const(kBand0To8kHz)[0][i]); - for (int j = 1; j < num_channels; ++j) { - value += FloatS16ToS16(audio->split_channels_const(kBand0To8kHz)[j][i]); - } - mixed_low_pass_data[i] = value / num_channels; - } - } - - int vad_ret = WebRtcVad_Process(vad_->state(), sample_rate_hz_, - mixed_low_pass.data(), frame_size_samples_); - RTC_DCHECK(vad_ret == 0 || vad_ret == 1); - return vad_ret == 0 ? false : true; -} -} // namespace webrtc diff --git a/modules/audio_processing/voice_detection.h b/modules/audio_processing/voice_detection.h deleted file mode 100644 index 79d44e647c..0000000000 --- a/modules/audio_processing/voice_detection.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef MODULES_AUDIO_PROCESSING_VOICE_DETECTION_H_ -#define MODULES_AUDIO_PROCESSING_VOICE_DETECTION_H_ - -#include - -#include - -#include "modules/audio_processing/include/audio_processing.h" - -namespace webrtc { - -class AudioBuffer; - -// The voice activity detection (VAD) component analyzes the stream to -// determine if voice is present. -class VoiceDetection { - public: - // Specifies the likelihood that a frame will be declared to contain voice. - // A higher value makes it more likely that speech will not be clipped, at - // the expense of more noise being detected as voice. - enum Likelihood { - kVeryLowLikelihood, - kLowLikelihood, - kModerateLikelihood, - kHighLikelihood - }; - - VoiceDetection(int sample_rate_hz, Likelihood likelihood); - ~VoiceDetection(); - - VoiceDetection(VoiceDetection&) = delete; - VoiceDetection& operator=(VoiceDetection&) = delete; - - // Returns true if voice is detected in the current frame. - bool ProcessCaptureAudio(AudioBuffer* audio); - - Likelihood likelihood() const { return likelihood_; } - - private: - class Vad; - - int sample_rate_hz_; - size_t frame_size_samples_; - Likelihood likelihood_; - std::unique_ptr vad_; -}; -} // namespace webrtc - -#endif // MODULES_AUDIO_PROCESSING_VOICE_DETECTION_H_ diff --git a/modules/audio_processing/voice_detection_unittest.cc b/modules/audio_processing/voice_detection_unittest.cc deleted file mode 100644 index 9a52fa60a0..0000000000 --- a/modules/audio_processing/voice_detection_unittest.cc +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#include - -#include "api/array_view.h" -#include "modules/audio_processing/audio_buffer.h" -#include "modules/audio_processing/test/audio_buffer_tools.h" -#include "modules/audio_processing/test/bitexactness_tools.h" -#include "modules/audio_processing/voice_detection.h" -#include "test/gtest.h" - -namespace webrtc { -namespace { - -const int kNumFramesToProcess = 1000; - -// Process one frame of data and produce the output. -bool ProcessOneFrame(int sample_rate_hz, - AudioBuffer* audio_buffer, - VoiceDetection* voice_detection) { - if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) { - audio_buffer->SplitIntoFrequencyBands(); - } - - return voice_detection->ProcessCaptureAudio(audio_buffer); -} - -// Processes a specified amount of frames, verifies the results and reports -// any errors. -void RunBitexactnessTest(int sample_rate_hz, - size_t num_channels, - bool stream_has_voice_reference) { - int sample_rate_to_use = std::min(sample_rate_hz, 16000); - VoiceDetection voice_detection(sample_rate_to_use, - VoiceDetection::kLowLikelihood); - - int samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, 100); - const StreamConfig capture_config(sample_rate_hz, num_channels, false); - AudioBuffer capture_buffer( - capture_config.sample_rate_hz(), capture_config.num_channels(), - capture_config.sample_rate_hz(), capture_config.num_channels(), - capture_config.sample_rate_hz(), capture_config.num_channels()); - test::InputAudioFile capture_file( - test::GetApmCaptureTestVectorFileName(sample_rate_hz)); - std::vector capture_input(samples_per_channel * num_channels); - bool stream_has_voice = false; - for (int frame_no = 0; frame_no < kNumFramesToProcess; ++frame_no) { - ReadFloatSamplesFromStereoFile(samples_per_channel, num_channels, - &capture_file, capture_input); - - test::CopyVectorToAudioBuffer(capture_config, capture_input, - &capture_buffer); - - stream_has_voice = - ProcessOneFrame(sample_rate_hz, &capture_buffer, &voice_detection); - } - - EXPECT_EQ(stream_has_voice_reference, stream_has_voice); -} - -const bool kStreamHasVoiceReference = true; - -} // namespace - -TEST(VoiceDetectionBitExactnessTest, Mono8kHz) { - RunBitexactnessTest(8000, 1, kStreamHasVoiceReference); -} - -TEST(VoiceDetectionBitExactnessTest, Mono16kHz) { - RunBitexactnessTest(16000, 1, kStreamHasVoiceReference); -} - -TEST(VoiceDetectionBitExactnessTest, Mono32kHz) { - RunBitexactnessTest(32000, 1, kStreamHasVoiceReference); -} - -TEST(VoiceDetectionBitExactnessTest, Mono48kHz) { - RunBitexactnessTest(48000, 1, kStreamHasVoiceReference); -} - -TEST(VoiceDetectionBitExactnessTest, Stereo8kHz) { - RunBitexactnessTest(8000, 2, kStreamHasVoiceReference); -} - -TEST(VoiceDetectionBitExactnessTest, Stereo16kHz) { - RunBitexactnessTest(16000, 2, kStreamHasVoiceReference); -} - -TEST(VoiceDetectionBitExactnessTest, Stereo32kHz) { - RunBitexactnessTest(32000, 2, kStreamHasVoiceReference); -} - -TEST(VoiceDetectionBitExactnessTest, Stereo48kHz) { - RunBitexactnessTest(48000, 2, kStreamHasVoiceReference); -} - -} // namespace webrtc diff --git a/modules/congestion_controller/OWNERS b/modules/congestion_controller/OWNERS index 3304c672cb..c74790f876 100644 --- a/modules/congestion_controller/OWNERS +++ b/modules/congestion_controller/OWNERS @@ -1,7 +1,6 @@ -srte@webrtc.org stefan@webrtc.org terelius@webrtc.org -crodbro@webrtc.org philipel@webrtc.org mflodman@webrtc.org yinwa@webrtc.org +perkj@webrtc.org diff --git a/modules/congestion_controller/goog_cc/BUILD.gn b/modules/congestion_controller/goog_cc/BUILD.gn index ea20da87a3..9aafedbcb4 100644 --- a/modules/congestion_controller/goog_cc/BUILD.gn +++ b/modules/congestion_controller/goog_cc/BUILD.gn @@ -27,9 +27,9 @@ rtc_library("goog_cc") { ":alr_detector", ":delay_based_bwe", ":estimators", - ":loss_based_controller", ":probe_controller", ":pushback_controller", + ":send_side_bwe", "../..:module_api", "../../../api:network_state_predictor_api", "../../../api/rtc_event_log", @@ -146,15 +146,55 @@ rtc_library("estimators") { ] } -rtc_library("loss_based_controller") { +rtc_library("loss_based_bwe_v2") { + sources = [ + "loss_based_bwe_v2.cc", + "loss_based_bwe_v2.h", + ] + deps = [ + "../../../api:array_view", + "../../../api/transport:network_control", + "../../../api/transport:webrtc_key_value_config", + "../../../api/units:data_rate", + "../../../api/units:data_size", + "../../../api/units:time_delta", + "../../../api/units:timestamp", + "../../../rtc_base:logging", + "../../../rtc_base/experiments:field_trial_parser", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("loss_based_bwe_v1") { configs += [ ":bwe_test_logging" ] sources = [ "loss_based_bandwidth_estimation.cc", "loss_based_bandwidth_estimation.h", + ] + deps = [ + "../../../api/transport:network_control", + "../../../api/transport:webrtc_key_value_config", + "../../../api/units:data_rate", + "../../../api/units:time_delta", + "../../../api/units:timestamp", + "../../../rtc_base:checks", + "../../../rtc_base/experiments:field_trial_parser", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] +} + +rtc_library("send_side_bwe") { + configs += [ ":bwe_test_logging" ] + sources = [ "send_side_bandwidth_estimation.cc", "send_side_bandwidth_estimation.h", ] deps = [ + ":loss_based_bwe_v1", + ":loss_based_bwe_v2", "../../../api/rtc_event_log", "../../../api/transport:network_control", "../../../api/transport:webrtc_key_value_config", @@ -269,6 +309,7 @@ if (rtc_include_tests) { "delay_based_bwe_unittest_helper.cc", "delay_based_bwe_unittest_helper.h", "goog_cc_network_control_unittest.cc", + "loss_based_bwe_v2_test.cc", "probe_bitrate_estimator_unittest.cc", "probe_controller_unittest.cc", "robust_throughput_estimator_unittest.cc", @@ -280,9 +321,10 @@ if (rtc_include_tests) { ":delay_based_bwe", ":estimators", ":goog_cc", - ":loss_based_controller", + ":loss_based_bwe_v2", ":probe_controller", ":pushback_controller", + ":send_side_bwe", "../../../api/rtc_event_log", "../../../api/test/network_emulation", "../../../api/test/network_emulation:create_cross_traffic", @@ -291,12 +333,15 @@ if (rtc_include_tests) { "../../../api/transport:network_control", "../../../api/transport:webrtc_key_value_config", "../../../api/units:data_rate", + "../../../api/units:data_size", + "../../../api/units:time_delta", "../../../api/units:timestamp", "../../../logging:mocks", "../../../logging:rtc_event_bwe", "../../../rtc_base:checks", "../../../rtc_base:rtc_base_approved", "../../../rtc_base:rtc_base_tests_utils", + "../../../rtc_base:stringutils", "../../../rtc_base/experiments:alr_experiment", "../../../system_wrappers", "../../../test:explicit_key_value_config", diff --git a/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.h b/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.h index f802191a2c..b6cee43125 100644 --- a/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.h +++ b/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_interface.h @@ -34,23 +34,23 @@ struct RobustThroughputEstimatorSettings { // The estimator handles delay spikes by removing the largest receive time // gap, but this introduces some bias that may lead to overestimation when - // there isn't any delay spike. If |reduce_bias| is true, we instead replace + // there isn't any delay spike. If `reduce_bias` is true, we instead replace // the largest receive time gap by the second largest. This reduces the bias // at the cost of not completely removing the genuine delay spikes. bool reduce_bias = true; - // If |assume_shared_link| is false, we ignore the size of the first packet + // If `assume_shared_link` is false, we ignore the size of the first packet // when computing the receive rate. Otherwise, we remove half of the first // and last packet's sizes. bool assume_shared_link = false; - // The estimator window keeps at least |min_packets| packets and up to - // kMaxPackets received during the last |window_duration|. + // The estimator window keeps at least `min_packets` packets and up to + // kMaxPackets received during the last `window_duration`. unsigned min_packets = 20; TimeDelta window_duration = TimeDelta::Millis(500); - // The estimator window requires at least |initial_packets| packets received - // over at least |initial_duration|. + // The estimator window requires at least `initial_packets` packets received + // over at least `initial_duration`. unsigned initial_packets = 20; // If audio packets are included in allocation, but not in bandwidth diff --git a/modules/congestion_controller/goog_cc/delay_based_bwe.cc b/modules/congestion_controller/goog_cc/delay_based_bwe.cc index 185b09d8ab..2ae5441ef4 100644 --- a/modules/congestion_controller/goog_cc/delay_based_bwe.cc +++ b/modules/congestion_controller/goog_cc/delay_based_bwe.cc @@ -32,21 +32,8 @@ namespace webrtc { namespace { constexpr TimeDelta kStreamTimeOut = TimeDelta::Seconds(2); - -// Used with field trial "WebRTC-Bwe-NewInterArrivalDelta/Enabled/ constexpr TimeDelta kSendTimeGroupLength = TimeDelta::Millis(5); -// Used unless field trial "WebRTC-Bwe-NewInterArrivalDelta/Enabled/" -constexpr int kTimestampGroupLengthMs = 5; -constexpr int kAbsSendTimeFraction = 18; -constexpr int kAbsSendTimeInterArrivalUpshift = 8; -constexpr int kInterArrivalShift = - kAbsSendTimeFraction + kAbsSendTimeInterArrivalUpshift; -constexpr int kTimestampGroupTicks = - (kTimestampGroupLengthMs << kInterArrivalShift) / 1000; -constexpr double kTimestampToMs = - 1000.0 / static_cast(1 << kInterArrivalShift); - // This ssrc is used to fulfill the current API but will be removed // after the API has been changed. constexpr uint32_t kFixedSsrc = 0; @@ -95,9 +82,6 @@ DelayBasedBwe::DelayBasedBwe(const WebRtcKeyValueConfig* key_value_config, prev_bitrate_(DataRate::Zero()), has_once_detected_overuse_(false), prev_state_(BandwidthUsage::kBwNormal), - use_new_inter_arrival_delta_(absl::StartsWith( - key_value_config->Lookup("WebRTC-Bwe-NewInterArrivalDelta"), - "Enabled")), alr_limited_backoff_enabled_(absl::StartsWith( key_value_config->Lookup("WebRTC-Bwe-AlrLimitedBackoff"), "Enabled")) { @@ -162,17 +146,11 @@ void DelayBasedBwe::IncomingPacketFeedback(const PacketResult& packet_feedback, // Reset if the stream has timed out. if (last_seen_packet_.IsInfinite() || at_time - last_seen_packet_ > kStreamTimeOut) { - if (use_new_inter_arrival_delta_) { - video_inter_arrival_delta_ = - std::make_unique(kSendTimeGroupLength); - audio_inter_arrival_delta_ = - std::make_unique(kSendTimeGroupLength); - } else { - video_inter_arrival_ = std::make_unique( - kTimestampGroupTicks, kTimestampToMs, true); - audio_inter_arrival_ = std::make_unique( - kTimestampGroupTicks, kTimestampToMs, true); - } + video_inter_arrival_delta_ = + std::make_unique(kSendTimeGroupLength); + audio_inter_arrival_delta_ = + std::make_unique(kSendTimeGroupLength); + video_delay_detector_.reset( new TrendlineEstimator(key_value_config_, network_state_predictor_)); audio_delay_detector_.reset( @@ -203,7 +181,6 @@ void DelayBasedBwe::IncomingPacketFeedback(const PacketResult& packet_feedback, } DataSize packet_size = packet_feedback.sent_packet.size; - if (use_new_inter_arrival_delta_) { TimeDelta send_delta = TimeDelta::Zero(); TimeDelta recv_delta = TimeDelta::Zero(); int size_delta = 0; @@ -221,39 +198,6 @@ void DelayBasedBwe::IncomingPacketFeedback(const PacketResult& packet_feedback, packet_feedback.sent_packet.send_time.ms(), packet_feedback.receive_time.ms(), packet_size.bytes(), calculated_deltas); - } else { - InterArrival* inter_arrival_for_packet = - (separate_audio_.enabled && packet_feedback.sent_packet.audio) - ? video_inter_arrival_.get() - : audio_inter_arrival_.get(); - - uint32_t send_time_24bits = - static_cast( - ((static_cast(packet_feedback.sent_packet.send_time.ms()) - << kAbsSendTimeFraction) + - 500) / - 1000) & - 0x00FFFFFF; - // Shift up send time to use the full 32 bits that inter_arrival works with, - // so wrapping works properly. - uint32_t timestamp = send_time_24bits << kAbsSendTimeInterArrivalUpshift; - - uint32_t timestamp_delta = 0; - int64_t recv_delta_ms = 0; - int size_delta = 0; - - bool calculated_deltas = inter_arrival_for_packet->ComputeDeltas( - timestamp, packet_feedback.receive_time.ms(), at_time.ms(), - packet_size.bytes(), ×tamp_delta, &recv_delta_ms, &size_delta); - double send_delta_ms = - (1000.0 * timestamp_delta) / (1 << kInterArrivalShift); - - delay_detector_for_packet->Update( - recv_delta_ms, send_delta_ms, - packet_feedback.sent_packet.send_time.ms(), - packet_feedback.receive_time.ms(), packet_size.bytes(), - calculated_deltas); - } } DataRate DelayBasedBwe::TriggerOveruse(Timestamp at_time, diff --git a/modules/congestion_controller/goog_cc/delay_based_bwe.h b/modules/congestion_controller/goog_cc/delay_based_bwe.h index 85ce6eaa82..7823f77abe 100644 --- a/modules/congestion_controller/goog_cc/delay_based_bwe.h +++ b/modules/congestion_controller/goog_cc/delay_based_bwe.h @@ -127,7 +127,6 @@ class DelayBasedBwe { DataRate prev_bitrate_; bool has_once_detected_overuse_; BandwidthUsage prev_state_; - const bool use_new_inter_arrival_delta_; bool alr_limited_backoff_enabled_; }; diff --git a/modules/congestion_controller/goog_cc/delay_based_bwe_unittest.cc b/modules/congestion_controller/goog_cc/delay_based_bwe_unittest.cc index 06345c4d9b..71b7ee7f90 100644 --- a/modules/congestion_controller/goog_cc/delay_based_bwe_unittest.cc +++ b/modules/congestion_controller/goog_cc/delay_based_bwe_unittest.cc @@ -28,15 +28,7 @@ const PacedPacketInfo kPacingInfo1(1, kNumProbesCluster1, 4000); constexpr float kTargetUtilizationFraction = 0.95f; } // namespace -INSTANTIATE_TEST_SUITE_P( - , - DelayBasedBweTest, - ::testing::Values("", "WebRTC-Bwe-NewInterArrivalDelta/Enabled/"), - [](::testing::TestParamInfo info) { - return info.param == "" ? "Default" : "NewInterArrival"; - }); - -TEST_P(DelayBasedBweTest, ProbeDetection) { +TEST_F(DelayBasedBweTest, ProbeDetection) { int64_t now_ms = clock_.TimeInMilliseconds(); // First burst sent at 8 * 1000 / 10 = 800 kbps. @@ -58,7 +50,7 @@ TEST_P(DelayBasedBweTest, ProbeDetection) { EXPECT_GT(bitrate_observer_.latest_bitrate(), 1500000u); } -TEST_P(DelayBasedBweTest, ProbeDetectionNonPacedPackets) { +TEST_F(DelayBasedBweTest, ProbeDetectionNonPacedPackets) { int64_t now_ms = clock_.TimeInMilliseconds(); // First burst sent at 8 * 1000 / 10 = 800 kbps, but with every other packet // not being paced which could mess things up. @@ -75,7 +67,7 @@ TEST_P(DelayBasedBweTest, ProbeDetectionNonPacedPackets) { EXPECT_GT(bitrate_observer_.latest_bitrate(), 800000u); } -TEST_P(DelayBasedBweTest, ProbeDetectionFasterArrival) { +TEST_F(DelayBasedBweTest, ProbeDetectionFasterArrival) { int64_t now_ms = clock_.TimeInMilliseconds(); // First burst sent at 8 * 1000 / 10 = 800 kbps. // Arriving at 8 * 1000 / 5 = 1600 kbps. @@ -90,7 +82,7 @@ TEST_P(DelayBasedBweTest, ProbeDetectionFasterArrival) { EXPECT_FALSE(bitrate_observer_.updated()); } -TEST_P(DelayBasedBweTest, ProbeDetectionSlowerArrival) { +TEST_F(DelayBasedBweTest, ProbeDetectionSlowerArrival) { int64_t now_ms = clock_.TimeInMilliseconds(); // First burst sent at 8 * 1000 / 5 = 1600 kbps. // Arriving at 8 * 1000 / 7 = 1142 kbps. @@ -109,7 +101,7 @@ TEST_P(DelayBasedBweTest, ProbeDetectionSlowerArrival) { kTargetUtilizationFraction * 1140000u, 10000u); } -TEST_P(DelayBasedBweTest, ProbeDetectionSlowerArrivalHighBitrate) { +TEST_F(DelayBasedBweTest, ProbeDetectionSlowerArrivalHighBitrate) { int64_t now_ms = clock_.TimeInMilliseconds(); // Burst sent at 8 * 1000 / 1 = 8000 kbps. // Arriving at 8 * 1000 / 2 = 4000 kbps. @@ -128,7 +120,7 @@ TEST_P(DelayBasedBweTest, ProbeDetectionSlowerArrivalHighBitrate) { kTargetUtilizationFraction * 4000000u, 10000u); } -TEST_P(DelayBasedBweTest, GetExpectedBwePeriodMs) { +TEST_F(DelayBasedBweTest, GetExpectedBwePeriodMs) { auto default_interval = bitrate_estimator_->GetExpectedBwePeriod(); EXPECT_GT(default_interval.ms(), 0); CapacityDropTestHelper(1, true, 333, 0); @@ -137,45 +129,45 @@ TEST_P(DelayBasedBweTest, GetExpectedBwePeriodMs) { EXPECT_NE(interval.ms(), default_interval.ms()); } -TEST_P(DelayBasedBweTest, InitialBehavior) { +TEST_F(DelayBasedBweTest, InitialBehavior) { InitialBehaviorTestHelper(730000); } -TEST_P(DelayBasedBweTest, RateIncreaseReordering) { +TEST_F(DelayBasedBweTest, RateIncreaseReordering) { RateIncreaseReorderingTestHelper(730000); } -TEST_P(DelayBasedBweTest, RateIncreaseRtpTimestamps) { +TEST_F(DelayBasedBweTest, RateIncreaseRtpTimestamps) { RateIncreaseRtpTimestampsTestHelper(622); } -TEST_P(DelayBasedBweTest, CapacityDropOneStream) { +TEST_F(DelayBasedBweTest, CapacityDropOneStream) { CapacityDropTestHelper(1, false, 300, 0); } -TEST_P(DelayBasedBweTest, CapacityDropPosOffsetChange) { +TEST_F(DelayBasedBweTest, CapacityDropPosOffsetChange) { CapacityDropTestHelper(1, false, 867, 30000); } -TEST_P(DelayBasedBweTest, CapacityDropNegOffsetChange) { +TEST_F(DelayBasedBweTest, CapacityDropNegOffsetChange) { CapacityDropTestHelper(1, false, 933, -30000); } -TEST_P(DelayBasedBweTest, CapacityDropOneStreamWrap) { +TEST_F(DelayBasedBweTest, CapacityDropOneStreamWrap) { CapacityDropTestHelper(1, true, 333, 0); } -TEST_P(DelayBasedBweTest, TestTimestampGrouping) { +TEST_F(DelayBasedBweTest, TestTimestampGrouping) { TestTimestampGroupingTestHelper(); } -TEST_P(DelayBasedBweTest, TestShortTimeoutAndWrap) { +TEST_F(DelayBasedBweTest, TestShortTimeoutAndWrap) { // Simulate a client leaving and rejoining the call after 35 seconds. This // will make abs send time wrap, so if streams aren't timed out properly // the next 30 seconds of packets will be out of order. TestWrappingHelper(35); } -TEST_P(DelayBasedBweTest, TestLongTimeoutAndWrap) { +TEST_F(DelayBasedBweTest, TestLongTimeoutAndWrap) { // Simulate a client leaving and rejoining the call after some multiple of // 64 seconds later. This will cause a zero difference in abs send times due // to the wrap, but a big difference in arrival time, if streams aren't @@ -183,7 +175,7 @@ TEST_P(DelayBasedBweTest, TestLongTimeoutAndWrap) { TestWrappingHelper(10 * 64); } -TEST_P(DelayBasedBweTest, TestInitialOveruse) { +TEST_F(DelayBasedBweTest, TestInitialOveruse) { const DataRate kStartBitrate = DataRate::KilobitsPerSec(300); const DataRate kInitialCapacity = DataRate::KilobitsPerSec(200); const uint32_t kDummySsrc = 0; @@ -223,16 +215,15 @@ TEST_P(DelayBasedBweTest, TestInitialOveruse) { } class DelayBasedBweTestWithBackoffTimeoutExperiment : public DelayBasedBweTest { + public: + DelayBasedBweTestWithBackoffTimeoutExperiment() + : DelayBasedBweTest( + "WebRTC-BweAimdRateControlConfig/initial_backoff_interval:200ms/") { + } }; -INSTANTIATE_TEST_SUITE_P( - , - DelayBasedBweTestWithBackoffTimeoutExperiment, - ::testing::Values( - "WebRTC-BweAimdRateControlConfig/initial_backoff_interval:200ms/")); - // This test subsumes and improves DelayBasedBweTest.TestInitialOveruse above. -TEST_P(DelayBasedBweTestWithBackoffTimeoutExperiment, TestInitialOveruse) { +TEST_F(DelayBasedBweTestWithBackoffTimeoutExperiment, TestInitialOveruse) { const DataRate kStartBitrate = DataRate::KilobitsPerSec(300); const DataRate kInitialCapacity = DataRate::KilobitsPerSec(200); const uint32_t kDummySsrc = 0; diff --git a/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.cc b/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.cc index 946805ab8a..3eb0ae38e5 100644 --- a/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.cc +++ b/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.cc @@ -96,7 +96,7 @@ void StreamGenerator::set_capacity_bps(int capacity_bps) { capacity_ = capacity_bps; } -// Divides |bitrate_bps| among all streams. The allocated bitrate per stream +// Divides `bitrate_bps` among all streams. The allocated bitrate per stream // is decided by the current allocation ratios. void StreamGenerator::SetBitrateBps(int bitrate_bps) { ASSERT_GE(streams_.size(), 0u); @@ -145,8 +145,11 @@ int64_t StreamGenerator::GenerateFrame(std::vector* packets, } } // namespace test -DelayBasedBweTest::DelayBasedBweTest() - : field_trial(std::make_unique(GetParam())), +DelayBasedBweTest::DelayBasedBweTest() : DelayBasedBweTest("") {} + +DelayBasedBweTest::DelayBasedBweTest(const std::string& field_trial_string) + : field_trial( + std::make_unique(field_trial_string)), clock_(100000000), acknowledged_bitrate_estimator_( AcknowledgedBitrateEstimatorInterface::Create(&field_trial_config_)), @@ -253,8 +256,8 @@ bool DelayBasedBweTest::GenerateAndProcessFrame(uint32_t ssrc, return overuse; } -// Run the bandwidth estimator with a stream of |number_of_frames| frames, or -// until it reaches |target_bitrate|. +// Run the bandwidth estimator with a stream of `number_of_frames` frames, or +// until it reaches `target_bitrate`. // Can for instance be used to run the estimator for some time to get it // into a steady state. uint32_t DelayBasedBweTest::SteadyStateRun(uint32_t ssrc, @@ -265,7 +268,7 @@ uint32_t DelayBasedBweTest::SteadyStateRun(uint32_t ssrc, uint32_t target_bitrate) { uint32_t bitrate_bps = start_bitrate; bool bitrate_update_seen = false; - // Produce |number_of_frames| frames and give them to the estimator. + // Produce `number_of_frames` frames and give them to the estimator. for (int i = 0; i < max_number_of_frames; ++i) { bool overuse = GenerateAndProcessFrame(ssrc, bitrate_bps); if (overuse) { @@ -476,7 +479,7 @@ void DelayBasedBweTest::TestTimestampGroupingTestHelper() { const int kTimestampGroupLength = 15; for (int i = 0; i < 100; ++i) { for (int j = 0; j < kTimestampGroupLength; ++j) { - // Insert |kTimestampGroupLength| frames with just 1 timestamp ticks in + // Insert `kTimestampGroupLength` frames with just 1 timestamp ticks in // between. Should be treated as part of the same group by the estimator. IncomingFeedback(clock_.TimeInMilliseconds(), send_time_ms, 100); clock_.AdvanceTimeMilliseconds(kFrameIntervalMs / kTimestampGroupLength); diff --git a/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.h b/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.h index 24e558c2d7..474d2970df 100644 --- a/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.h +++ b/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.h @@ -22,7 +22,6 @@ #include "api/transport/network_types.h" #include "modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.h" #include "modules/congestion_controller/goog_cc/delay_based_bwe.h" -#include "rtc_base/constructor_magic.h" #include "system_wrappers/include/clock.h" #include "test/field_trial.h" #include "test/gtest.h" @@ -54,6 +53,9 @@ class RtpStream { RtpStream(int fps, int bitrate_bps); + RtpStream(const RtpStream&) = delete; + RtpStream& operator=(const RtpStream&) = delete; + // Generates a new frame for this stream. If called too soon after the // previous frame, no frame will be generated. The frame is split into // packets. @@ -74,8 +76,6 @@ class RtpStream { int fps_; int bitrate_bps_; int64_t next_rtp_time_; - - RTC_DISALLOW_COPY_AND_ASSIGN(RtpStream); }; class StreamGenerator { @@ -83,17 +83,20 @@ class StreamGenerator { StreamGenerator(int capacity, int64_t time_now); ~StreamGenerator(); + StreamGenerator(const StreamGenerator&) = delete; + StreamGenerator& operator=(const StreamGenerator&) = delete; + // Add a new stream. void AddStream(RtpStream* stream); // Set the link capacity. void set_capacity_bps(int capacity_bps); - // Divides |bitrate_bps| among all streams. The allocated bitrate per stream + // Divides `bitrate_bps` among all streams. The allocated bitrate per stream // is decided by the initial allocation ratios. void SetBitrateBps(int bitrate_bps); - // Set the RTP timestamp offset for the stream identified by |ssrc|. + // Set the RTP timestamp offset for the stream identified by `ssrc`. void set_rtp_timestamp_offset(uint32_t ssrc, uint32_t offset); // TODO(holmer): Break out the channel simulation part from this class to make @@ -108,14 +111,13 @@ class StreamGenerator { int64_t prev_arrival_time_us_; // All streams being transmitted on this simulated channel. std::vector> streams_; - - RTC_DISALLOW_COPY_AND_ASSIGN(StreamGenerator); }; } // namespace test -class DelayBasedBweTest : public ::testing::TestWithParam { +class DelayBasedBweTest : public ::testing::Test { public: DelayBasedBweTest(); + explicit DelayBasedBweTest(const std::string& field_trial_string); ~DelayBasedBweTest() override; protected: @@ -138,8 +140,8 @@ class DelayBasedBweTest : public ::testing::TestWithParam { // target bitrate after the call to this function. bool GenerateAndProcessFrame(uint32_t ssrc, uint32_t bitrate_bps); - // Run the bandwidth estimator with a stream of |number_of_frames| frames, or - // until it reaches |target_bitrate|. + // Run the bandwidth estimator with a stream of `number_of_frames` frames, or + // until it reaches `target_bitrate`. // Can for instance be used to run the estimator for some time to get it // into a steady state. uint32_t SteadyStateRun(uint32_t ssrc, diff --git a/modules/congestion_controller/goog_cc/delay_increase_detector_interface.h b/modules/congestion_controller/goog_cc/delay_increase_detector_interface.h index eaadb0d124..fc12cff7d5 100644 --- a/modules/congestion_controller/goog_cc/delay_increase_detector_interface.h +++ b/modules/congestion_controller/goog_cc/delay_increase_detector_interface.h @@ -13,7 +13,6 @@ #include #include "api/network_state_predictor.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -22,6 +21,11 @@ class DelayIncreaseDetectorInterface { DelayIncreaseDetectorInterface() {} virtual ~DelayIncreaseDetectorInterface() {} + DelayIncreaseDetectorInterface(const DelayIncreaseDetectorInterface&) = + delete; + DelayIncreaseDetectorInterface& operator=( + const DelayIncreaseDetectorInterface&) = delete; + // Update the detector with a new sample. The deltas should represent deltas // between timestamp groups as defined by the InterArrival class. virtual void Update(double recv_delta_ms, @@ -32,8 +36,6 @@ class DelayIncreaseDetectorInterface { bool calculated_deltas) = 0; virtual BandwidthUsage State() const = 0; - - RTC_DISALLOW_COPY_AND_ASSIGN(DelayIncreaseDetectorInterface); }; } // namespace webrtc diff --git a/modules/congestion_controller/goog_cc/goog_cc_network_control.cc b/modules/congestion_controller/goog_cc/goog_cc_network_control.cc index 2344f45a65..ba656be234 100644 --- a/modules/congestion_controller/goog_cc/goog_cc_network_control.cc +++ b/modules/congestion_controller/goog_cc/goog_cc_network_control.cc @@ -22,6 +22,7 @@ #include #include "absl/strings/match.h" +#include "api/units/data_rate.h" #include "api/units/time_delta.h" #include "logging/rtc_event_log/events/rtc_event_remote_estimate.h" #include "modules/congestion_controller/goog_cc/alr_detector.h" @@ -88,6 +89,9 @@ GoogCcNetworkController::GoogCcNetworkController(NetworkControllerConfig config, RateControlSettings::ParseFromKeyValueConfig(key_value_config_)), loss_based_stable_rate_( IsEnabled(key_value_config_, "WebRTC-Bwe-LossBasedStableRate")), + pace_at_max_of_bwe_and_lower_link_capacity_( + IsEnabled(key_value_config_, + "WebRTC-Bwe-PaceAtMaxOfBweAndLowerLinkCapacity")), probe_controller_( new ProbeController(key_value_config_, config.event_log)), congestion_window_pushback_controller_( @@ -694,9 +698,17 @@ void GoogCcNetworkController::MaybeTriggerOnNetworkChanged( PacerConfig GoogCcNetworkController::GetPacingRates(Timestamp at_time) const { // Pacing rate is based on target rate before congestion window pushback, // because we don't want to build queues in the pacer when pushback occurs. - DataRate pacing_rate = - std::max(min_total_allocated_bitrate_, last_loss_based_target_rate_) * - pacing_factor_; + DataRate pacing_rate = DataRate::Zero(); + if (pace_at_max_of_bwe_and_lower_link_capacity_ && estimate_) { + pacing_rate = + std::max({min_total_allocated_bitrate_, estimate_->link_capacity_lower, + last_loss_based_target_rate_}) * + pacing_factor_; + } else { + pacing_rate = + std::max(min_total_allocated_bitrate_, last_loss_based_target_rate_) * + pacing_factor_; + } DataRate padding_rate = std::min(max_padding_rate_, last_pushback_target_rate_); PacerConfig msg; diff --git a/modules/congestion_controller/goog_cc/goog_cc_network_control.h b/modules/congestion_controller/goog_cc/goog_cc_network_control.h index 6dd70c8969..946c076939 100644 --- a/modules/congestion_controller/goog_cc/goog_cc_network_control.h +++ b/modules/congestion_controller/goog_cc/goog_cc_network_control.h @@ -94,6 +94,7 @@ class GoogCcNetworkController : public NetworkControllerInterface { const bool limit_probes_lower_than_throughput_estimate_; const RateControlSettings rate_control_settings_; const bool loss_based_stable_rate_; + const bool pace_at_max_of_bwe_and_lower_link_capacity_; const std::unique_ptr probe_controller_; const std::unique_ptr diff --git a/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc b/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc index 7e8d7b9ac6..0552109a0d 100644 --- a/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc +++ b/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc @@ -13,17 +13,14 @@ #include "api/test/network_emulation/create_cross_traffic.h" #include "api/test/network_emulation/cross_traffic.h" #include "api/transport/goog_cc_factory.h" +#include "api/transport/network_types.h" #include "api/units/data_rate.h" #include "logging/rtc_event_log/mock/mock_rtc_event_log.h" #include "test/field_trial.h" #include "test/gtest.h" #include "test/scenario/scenario.h" -using ::testing::_; -using ::testing::Field; -using ::testing::Matcher; using ::testing::NiceMock; -using ::testing::Property; namespace webrtc { namespace test { @@ -75,6 +72,72 @@ CallClient* CreateVideoSendingClient( return client; } +NetworkRouteChange CreateRouteChange( + Timestamp time, + absl::optional start_rate = absl::nullopt, + absl::optional min_rate = absl::nullopt, + absl::optional max_rate = absl::nullopt) { + NetworkRouteChange route_change; + route_change.at_time = time; + route_change.constraints.at_time = time; + route_change.constraints.min_data_rate = min_rate; + route_change.constraints.max_data_rate = max_rate; + route_change.constraints.starting_rate = start_rate; + return route_change; +} + +PacketResult CreatePacketResult(Timestamp arrival_time, + Timestamp send_time, + size_t payload_size, + PacedPacketInfo pacing_info) { + PacketResult packet_result; + packet_result.sent_packet = SentPacket(); + packet_result.sent_packet.send_time = send_time; + packet_result.sent_packet.size = DataSize::Bytes(payload_size); + packet_result.sent_packet.pacing_info = pacing_info; + packet_result.receive_time = arrival_time; + return packet_result; +} + +// Simulate sending packets and receiving transport feedback during +// `runtime_ms`. +absl::optional PacketTransmissionAndFeedbackBlock( + NetworkControllerInterface* controller, + int64_t runtime_ms, + int64_t delay, + Timestamp& current_time) { + NetworkControlUpdate update; + absl::optional target_bitrate; + int64_t delay_buildup = 0; + int64_t start_time_ms = current_time.ms(); + while (current_time.ms() - start_time_ms < runtime_ms) { + constexpr size_t kPayloadSize = 1000; + PacketResult packet = + CreatePacketResult(current_time + TimeDelta::Millis(delay_buildup), + current_time, kPayloadSize, PacedPacketInfo()); + delay_buildup += delay; + update = controller->OnSentPacket(packet.sent_packet); + if (update.target_rate) { + target_bitrate = update.target_rate->target_rate; + } + TransportPacketsFeedback feedback; + feedback.feedback_time = packet.receive_time; + feedback.packet_feedbacks.push_back(packet); + update = controller->OnTransportPacketsFeedback(feedback); + if (update.target_rate) { + target_bitrate = update.target_rate->target_rate; + } + current_time += TimeDelta::Millis(50); + update = controller->OnProcessInterval({.at_time = current_time}); + if (update.target_rate) { + target_bitrate = update.target_rate->target_rate; + } + } + return target_bitrate; +} + +// Scenarios: + void UpdatesTargetRateBasedOnLinkCapacity(std::string test_name = "") { ScopedFieldTrials trial("WebRTC-SendSideBwe-WithOverhead/Enabled/"); auto factory = CreateFeedbackOnlyFactory(); @@ -153,40 +216,27 @@ DataRate RunRembDipScenario(std::string test_name) { return client->send_bandwidth(); } + } // namespace -class GoogCcNetworkControllerTest : public ::testing::Test { - protected: - GoogCcNetworkControllerTest() - : current_time_(Timestamp::Millis(123456)), factory_() {} - ~GoogCcNetworkControllerTest() override {} - - void SetUp() override { - controller_ = factory_.Create(InitialConfig()); - NetworkControlUpdate update = - controller_->OnProcessInterval(DefaultInterval()); - EXPECT_EQ(update.target_rate->target_rate, kInitialBitrate); - EXPECT_EQ(update.pacer_config->data_rate(), - kInitialBitrate * kDefaultPacingRate); - - EXPECT_EQ(update.probe_cluster_configs[0].target_data_rate, - kInitialBitrate * 3); - EXPECT_EQ(update.probe_cluster_configs[1].target_data_rate, - kInitialBitrate * 5); - } - // Custom setup - use an observer that tracks the target bitrate, without - // prescribing on which iterations it must change (like a mock would). - void TargetBitrateTrackingSetup() { - controller_ = factory_.Create(InitialConfig()); - OnUpdate(controller_->OnProcessInterval(DefaultInterval())); +class NetworkControllerTestFixture { + public: + NetworkControllerTestFixture() : factory_() {} + + std::unique_ptr CreateController() { + NetworkControllerConfig config = InitialConfig(); + std::unique_ptr controller = + factory_.Create(config); + return controller; } + private: NetworkControllerConfig InitialConfig( int starting_bandwidth_kbps = kInitialBitrateKbps, int min_data_rate_kbps = 0, int max_data_rate_kbps = 5 * kInitialBitrateKbps) { NetworkControllerConfig config; - config.constraints.at_time = current_time_; + config.constraints.at_time = Timestamp::Millis(0); config.constraints.min_data_rate = DataRate::KilobitsPerSec(min_data_rate_kbps); config.constraints.max_data_rate = @@ -196,102 +246,173 @@ class GoogCcNetworkControllerTest : public ::testing::Test { config.event_log = &event_log_; return config; } - ProcessInterval DefaultInterval() { - ProcessInterval interval; - interval.at_time = current_time_; - return interval; - } - RemoteBitrateReport CreateBitrateReport(DataRate rate) { - RemoteBitrateReport report; - report.receive_time = current_time_; - report.bandwidth = rate; - return report; - } - PacketResult CreateResult(int64_t arrival_time_ms, - int64_t send_time_ms, - size_t payload_size, - PacedPacketInfo pacing_info) { - PacketResult packet_result; - packet_result.sent_packet = SentPacket(); - packet_result.sent_packet.send_time = Timestamp::Millis(send_time_ms); - packet_result.sent_packet.size = DataSize::Bytes(payload_size); - packet_result.sent_packet.pacing_info = pacing_info; - packet_result.receive_time = Timestamp::Millis(arrival_time_ms); - return packet_result; - } - - NetworkRouteChange CreateRouteChange( - absl::optional start_rate = absl::nullopt, - absl::optional min_rate = absl::nullopt, - absl::optional max_rate = absl::nullopt) { - NetworkRouteChange route_change; - route_change.at_time = current_time_; - route_change.constraints.at_time = current_time_; - route_change.constraints.min_data_rate = min_rate; - route_change.constraints.max_data_rate = max_rate; - route_change.constraints.starting_rate = start_rate; - return route_change; - } - - void AdvanceTimeMilliseconds(int timedelta_ms) { - current_time_ += TimeDelta::Millis(timedelta_ms); - } - void OnUpdate(NetworkControlUpdate update) { - if (update.target_rate) - target_bitrate_ = update.target_rate->target_rate; - } - - void PacketTransmissionAndFeedbackBlock(int64_t runtime_ms, int64_t delay) { - int64_t delay_buildup = 0; - int64_t start_time_ms = current_time_.ms(); - while (current_time_.ms() - start_time_ms < runtime_ms) { - constexpr size_t kPayloadSize = 1000; - PacketResult packet = - CreateResult(current_time_.ms() + delay_buildup, current_time_.ms(), - kPayloadSize, PacedPacketInfo()); - delay_buildup += delay; - OnUpdate(controller_->OnSentPacket(packet.sent_packet)); - TransportPacketsFeedback feedback; - feedback.feedback_time = packet.receive_time; - feedback.packet_feedbacks.push_back(packet); - OnUpdate(controller_->OnTransportPacketsFeedback(feedback)); - AdvanceTimeMilliseconds(50); - OnUpdate(controller_->OnProcessInterval(DefaultInterval())); - } - } - Timestamp current_time_; - absl::optional target_bitrate_; NiceMock event_log_; GoogCcNetworkControllerFactory factory_; - std::unique_ptr controller_; }; -TEST_F(GoogCcNetworkControllerTest, ReactsToChangedNetworkConditions) { - // Test no change. - AdvanceTimeMilliseconds(25); - OnUpdate(controller_->OnProcessInterval(DefaultInterval())); +TEST(GoogCcNetworkControllerTest, InitializeTargetRateOnFirstProcessInterval) { + NetworkControllerTestFixture fixture; + std::unique_ptr controller = + fixture.CreateController(); - NetworkControlUpdate update; - OnUpdate(controller_->OnRemoteBitrateReport( - CreateBitrateReport(kInitialBitrate * 2))); - AdvanceTimeMilliseconds(25); - update = controller_->OnProcessInterval(DefaultInterval()); + NetworkControlUpdate update = + controller->OnProcessInterval({.at_time = Timestamp::Millis(123456)}); + + EXPECT_EQ(update.target_rate->target_rate, kInitialBitrate); + EXPECT_EQ(update.pacer_config->data_rate(), + kInitialBitrate * kDefaultPacingRate); + EXPECT_EQ(update.probe_cluster_configs[0].target_data_rate, + kInitialBitrate * 3); + EXPECT_EQ(update.probe_cluster_configs[1].target_data_rate, + kInitialBitrate * 5); +} + +TEST(GoogCcNetworkControllerTest, ReactsToChangedNetworkConditions) { + NetworkControllerTestFixture fixture; + std::unique_ptr controller = + fixture.CreateController(); + Timestamp current_time = Timestamp::Millis(123); + NetworkControlUpdate update = + controller->OnProcessInterval({.at_time = current_time}); + update = controller->OnRemoteBitrateReport( + {.receive_time = current_time, .bandwidth = kInitialBitrate * 2}); + + current_time += TimeDelta::Millis(25); + update = controller->OnProcessInterval({.at_time = current_time}); EXPECT_EQ(update.target_rate->target_rate, kInitialBitrate * 2); EXPECT_EQ(update.pacer_config->data_rate(), kInitialBitrate * 2 * kDefaultPacingRate); - OnUpdate( - controller_->OnRemoteBitrateReport(CreateBitrateReport(kInitialBitrate))); - AdvanceTimeMilliseconds(25); - update = controller_->OnProcessInterval(DefaultInterval()); + update = controller->OnRemoteBitrateReport( + {.receive_time = current_time, .bandwidth = kInitialBitrate}); + current_time += TimeDelta::Millis(25); + update = controller->OnProcessInterval({.at_time = current_time}); EXPECT_EQ(update.target_rate->target_rate, kInitialBitrate); EXPECT_EQ(update.pacer_config->data_rate(), kInitialBitrate * kDefaultPacingRate); } +TEST(GoogCcNetworkControllerTest, OnNetworkRouteChanged) { + NetworkControllerTestFixture fixture; + std::unique_ptr controller = + fixture.CreateController(); + Timestamp current_time = Timestamp::Millis(123); + DataRate new_bitrate = DataRate::BitsPerSec(200000); + NetworkControlUpdate update = controller->OnNetworkRouteChange( + CreateRouteChange(current_time, new_bitrate)); + EXPECT_EQ(update.target_rate->target_rate, new_bitrate); + EXPECT_EQ(update.pacer_config->data_rate(), new_bitrate * kDefaultPacingRate); + EXPECT_EQ(update.probe_cluster_configs.size(), 2u); + + // If the bitrate is reset to -1, the new starting bitrate will be + // the minimum default bitrate. + const DataRate kDefaultMinBitrate = DataRate::KilobitsPerSec(5); + update = controller->OnNetworkRouteChange(CreateRouteChange(current_time)); + EXPECT_EQ(update.target_rate->target_rate, kDefaultMinBitrate); + EXPECT_NEAR(update.pacer_config->data_rate().bps(), + kDefaultMinBitrate.bps() * kDefaultPacingRate, 10); + EXPECT_EQ(update.probe_cluster_configs.size(), 2u); +} + +TEST(GoogCcNetworkControllerTest, ProbeOnRouteChange) { + NetworkControllerTestFixture fixture; + std::unique_ptr controller = + fixture.CreateController(); + Timestamp current_time = Timestamp::Millis(123); + NetworkControlUpdate update = controller->OnNetworkRouteChange( + CreateRouteChange(current_time, 2 * kInitialBitrate, DataRate::Zero(), + 20 * kInitialBitrate)); + + EXPECT_TRUE(update.pacer_config.has_value()); + EXPECT_EQ(update.target_rate->target_rate, kInitialBitrate * 2); + EXPECT_EQ(update.probe_cluster_configs.size(), 2u); + EXPECT_EQ(update.probe_cluster_configs[0].target_data_rate, + kInitialBitrate * 6); + EXPECT_EQ(update.probe_cluster_configs[1].target_data_rate, + kInitialBitrate * 12); + + update = controller->OnProcessInterval({.at_time = current_time}); +} + +// Bandwidth estimation is updated when feedbacks are received. +// Feedbacks which show an increasing delay cause the estimation to be reduced. +TEST(GoogCcNetworkControllerTest, UpdatesDelayBasedEstimate) { + NetworkControllerTestFixture fixture; + std::unique_ptr controller = + fixture.CreateController(); + const int64_t kRunTimeMs = 6000; + Timestamp current_time = Timestamp::Millis(123); + + // The test must run and insert packets/feedback long enough that the + // BWE computes a valid estimate. This is first done in an environment which + // simulates no bandwidth limitation, and therefore not built-up delay. + absl::optional target_bitrate_before_delay = + PacketTransmissionAndFeedbackBlock(controller.get(), kRunTimeMs, 0, + current_time); + ASSERT_TRUE(target_bitrate_before_delay.has_value()); + + // Repeat, but this time with a building delay, and make sure that the + // estimation is adjusted downwards. + absl::optional target_bitrate_after_delay = + PacketTransmissionAndFeedbackBlock(controller.get(), kRunTimeMs, 50, + current_time); + EXPECT_LT(*target_bitrate_after_delay, *target_bitrate_before_delay); +} + +TEST(GoogCcNetworkControllerTest, PaceAtMaxOfLowerLinkCapacityAndBwe) { + ScopedFieldTrials trial( + "WebRTC-Bwe-PaceAtMaxOfBweAndLowerLinkCapacity/Enabled/"); + NetworkControllerTestFixture fixture; + std::unique_ptr controller = + fixture.CreateController(); + Timestamp current_time = Timestamp::Millis(123); + NetworkControlUpdate update = + controller->OnProcessInterval({.at_time = current_time}); + current_time += TimeDelta::Millis(100); + NetworkStateEstimate network_estimate = {.link_capacity_lower = + 10 * kInitialBitrate}; + update = controller->OnNetworkStateEstimate(network_estimate); + // OnNetworkStateEstimate does not trigger processing a new estimate. So add a + // dummy loss report to trigger a BWE update in the next process interval. + TransportLossReport loss_report; + loss_report.start_time = current_time; + loss_report.end_time = current_time; + loss_report.receive_time = current_time; + loss_report.packets_received_delta = 50; + loss_report.packets_lost_delta = 1; + update = controller->OnTransportLossReport(loss_report); + update = controller->OnProcessInterval({.at_time = current_time}); + ASSERT_TRUE(update.pacer_config); + ASSERT_TRUE(update.target_rate); + ASSERT_LT(update.target_rate->target_rate, + network_estimate.link_capacity_lower); + EXPECT_EQ(update.pacer_config->data_rate().kbps(), + network_estimate.link_capacity_lower.kbps() * kDefaultPacingRate); + + current_time += TimeDelta::Millis(100); + // Set a low link capacity estimate and verify that pacing rate is set + // relative to loss based/delay based estimate. + network_estimate = {.link_capacity_lower = 0.5 * kInitialBitrate}; + update = controller->OnNetworkStateEstimate(network_estimate); + // Again, we need to inject a dummy loss report to trigger an update of the + // BWE in the next process interval. + loss_report.start_time = current_time; + loss_report.end_time = current_time; + loss_report.receive_time = current_time; + loss_report.packets_received_delta = 50; + loss_report.packets_lost_delta = 0; + update = controller->OnTransportLossReport(loss_report); + update = controller->OnProcessInterval({.at_time = current_time}); + ASSERT_TRUE(update.target_rate); + ASSERT_GT(update.target_rate->target_rate, + network_estimate.link_capacity_lower); + EXPECT_EQ(update.pacer_config->data_rate().kbps(), + update.target_rate->target_rate.kbps() * kDefaultPacingRate); +} + // Test congestion window pushback on network delay happens. -TEST_F(GoogCcNetworkControllerTest, CongestionWindowPushbackOnNetworkDelay) { +TEST(GoogCcScenario, CongestionWindowPushbackOnNetworkDelay) { auto factory = CreateFeedbackOnlyFactory(); ScopedFieldTrials trial( "WebRTC-CongestionWindow/QueueSize:800,MinBitrate:30000/"); @@ -325,8 +446,7 @@ TEST_F(GoogCcNetworkControllerTest, CongestionWindowPushbackOnNetworkDelay) { } // Test congestion window pushback on network delay happens. -TEST_F(GoogCcNetworkControllerTest, - CongestionWindowPushbackDropFrameOnNetworkDelay) { +TEST(GoogCcScenario, CongestionWindowPushbackDropFrameOnNetworkDelay) { auto factory = CreateFeedbackOnlyFactory(); ScopedFieldTrials trial( "WebRTC-CongestionWindow/QueueSize:800,MinBitrate:30000,DropFrame:true/"); @@ -358,61 +478,7 @@ TEST_F(GoogCcNetworkControllerTest, EXPECT_GT(client->target_rate().kbps(), 300); } -TEST_F(GoogCcNetworkControllerTest, OnNetworkRouteChanged) { - NetworkControlUpdate update; - DataRate new_bitrate = DataRate::BitsPerSec(200000); - update = controller_->OnNetworkRouteChange(CreateRouteChange(new_bitrate)); - EXPECT_EQ(update.target_rate->target_rate, new_bitrate); - EXPECT_EQ(update.pacer_config->data_rate(), new_bitrate * kDefaultPacingRate); - EXPECT_EQ(update.probe_cluster_configs.size(), 2u); - - // If the bitrate is reset to -1, the new starting bitrate will be - // the minimum default bitrate. - const DataRate kDefaultMinBitrate = DataRate::KilobitsPerSec(5); - update = controller_->OnNetworkRouteChange(CreateRouteChange()); - EXPECT_EQ(update.target_rate->target_rate, kDefaultMinBitrate); - EXPECT_NEAR(update.pacer_config->data_rate().bps(), - kDefaultMinBitrate.bps() * kDefaultPacingRate, 10); - EXPECT_EQ(update.probe_cluster_configs.size(), 2u); -} - -TEST_F(GoogCcNetworkControllerTest, ProbeOnRouteChange) { - NetworkControlUpdate update; - update = controller_->OnNetworkRouteChange(CreateRouteChange( - 2 * kInitialBitrate, DataRate::Zero(), 20 * kInitialBitrate)); - - EXPECT_TRUE(update.pacer_config.has_value()); - EXPECT_EQ(update.target_rate->target_rate, kInitialBitrate * 2); - EXPECT_EQ(update.probe_cluster_configs.size(), 2u); - EXPECT_EQ(update.probe_cluster_configs[0].target_data_rate, - kInitialBitrate * 6); - EXPECT_EQ(update.probe_cluster_configs[1].target_data_rate, - kInitialBitrate * 12); - - update = controller_->OnProcessInterval(DefaultInterval()); -} - -// Bandwidth estimation is updated when feedbacks are received. -// Feedbacks which show an increasing delay cause the estimation to be reduced. -TEST_F(GoogCcNetworkControllerTest, UpdatesDelayBasedEstimate) { - TargetBitrateTrackingSetup(); - const int64_t kRunTimeMs = 6000; - - // The test must run and insert packets/feedback long enough that the - // BWE computes a valid estimate. This is first done in an environment which - // simulates no bandwidth limitation, and therefore not built-up delay. - PacketTransmissionAndFeedbackBlock(kRunTimeMs, 0); - ASSERT_TRUE(target_bitrate_.has_value()); - - // Repeat, but this time with a building delay, and make sure that the - // estimation is adjusted downwards. - DataRate bitrate_before_delay = *target_bitrate_; - PacketTransmissionAndFeedbackBlock(kRunTimeMs, 50); - EXPECT_LT(*target_bitrate_, bitrate_before_delay); -} - -TEST_F(GoogCcNetworkControllerTest, - PaddingRateLimitedByCongestionWindowInTrial) { +TEST(GoogCcScenario, PaddingRateLimitedByCongestionWindowInTrial) { ScopedFieldTrials trial( "WebRTC-CongestionWindow/QueueSize:200,MinBitrate:30000/"); @@ -447,7 +513,7 @@ TEST_F(GoogCcNetworkControllerTest, EXPECT_NEAR(client->padding_rate().kbps(), client->target_rate().kbps(), 1); } -TEST_F(GoogCcNetworkControllerTest, LimitsToFloorIfRttIsHighInTrial) { +TEST(GoogCcScenario, LimitsToFloorIfRttIsHighInTrial) { // The field trial limits maximum RTT to 2 seconds, higher RTT means that the // controller backs off until it reaches the minimum configured bitrate. This // allows the RTT to recover faster than the regular control mechanism would @@ -484,11 +550,11 @@ TEST_F(GoogCcNetworkControllerTest, LimitsToFloorIfRttIsHighInTrial) { EXPECT_NEAR(client->target_rate().kbps(), kBandwidthFloor.kbps(), 5); } -TEST_F(GoogCcNetworkControllerTest, UpdatesTargetRateBasedOnLinkCapacity) { +TEST(GoogCcScenario, UpdatesTargetRateBasedOnLinkCapacity) { UpdatesTargetRateBasedOnLinkCapacity(); } -TEST_F(GoogCcNetworkControllerTest, StableEstimateDoesNotVaryInSteadyState) { +TEST(GoogCcScenario, StableEstimateDoesNotVaryInSteadyState) { auto factory = CreateFeedbackOnlyFactory(); Scenario s("googcc_unit/stable_target", false); CallClientConfig config; @@ -525,15 +591,13 @@ TEST_F(GoogCcNetworkControllerTest, StableEstimateDoesNotVaryInSteadyState) { EXPECT_GE(min_stable_target / max_stable_target, min_target / max_target); } -TEST_F(GoogCcNetworkControllerTest, - LossBasedControlUpdatesTargetRateBasedOnLinkCapacity) { +TEST(GoogCcScenario, LossBasedControlUpdatesTargetRateBasedOnLinkCapacity) { ScopedFieldTrials trial("WebRTC-Bwe-LossBasedControl/Enabled/"); // TODO(srte): Should the behavior be unaffected at low loss rates? UpdatesTargetRateBasedOnLinkCapacity("_loss_based"); } -TEST_F(GoogCcNetworkControllerTest, - LossBasedControlDoesModestBackoffToHighLoss) { +TEST(GoogCcScenario, LossBasedControlDoesModestBackoffToHighLoss) { ScopedFieldTrials trial("WebRTC-Bwe-LossBasedControl/Enabled/"); Scenario s("googcc_unit/high_loss_channel", false); CallClientConfig config; @@ -600,8 +664,7 @@ DataRate AverageBitrateAfterCrossInducedLoss(std::string name) { s.TimeSinceStart(); } -TEST_F(GoogCcNetworkControllerTest, - LossBasedRecoversFasterAfterCrossInducedLoss) { +TEST(GoogCcScenario, LossBasedRecoversFasterAfterCrossInducedLoss) { // This test acts as a reference for the test below, showing that without the // trial, we have worse behavior. DataRate average_bitrate_without_loss_based = @@ -610,7 +673,6 @@ TEST_F(GoogCcNetworkControllerTest, // We recover bitrate better when subject to loss spikes from cross traffic // when loss based controller is used. ScopedFieldTrials trial("WebRTC-Bwe-LossBasedControl/Enabled/"); - SetUp(); DataRate average_bitrate_with_loss_based = AverageBitrateAfterCrossInducedLoss("googcc_unit/cross_loss_based"); @@ -618,7 +680,7 @@ TEST_F(GoogCcNetworkControllerTest, average_bitrate_without_loss_based * 1.1); } -TEST_F(GoogCcNetworkControllerTest, LossBasedEstimatorCapsRateAtModerateLoss) { +TEST(GoogCcScenario, LossBasedEstimatorCapsRateAtModerateLoss) { ScopedFieldTrials trial("WebRTC-Bwe-LossBasedControl/Enabled/"); Scenario s("googcc_unit/moderate_loss_channel", false); CallClientConfig config; @@ -651,7 +713,7 @@ TEST_F(GoogCcNetworkControllerTest, LossBasedEstimatorCapsRateAtModerateLoss) { EXPECT_LT(client->target_rate().kbps(), 2500); } -TEST_F(GoogCcNetworkControllerTest, MaintainsLowRateInSafeResetTrial) { +TEST(GoogCcScenario, MaintainsLowRateInSafeResetTrial) { const DataRate kLinkCapacity = DataRate::KilobitsPerSec(200); const DataRate kStartRate = DataRate::KilobitsPerSec(300); @@ -678,7 +740,7 @@ TEST_F(GoogCcNetworkControllerTest, MaintainsLowRateInSafeResetTrial) { EXPECT_NEAR(client->send_bandwidth().kbps(), kLinkCapacity.kbps(), 50); } -TEST_F(GoogCcNetworkControllerTest, CutsHighRateInSafeResetTrial) { +TEST(GoogCcScenario, CutsHighRateInSafeResetTrial) { const DataRate kLinkCapacity = DataRate::KilobitsPerSec(1000); const DataRate kStartRate = DataRate::KilobitsPerSec(300); @@ -705,7 +767,7 @@ TEST_F(GoogCcNetworkControllerTest, CutsHighRateInSafeResetTrial) { EXPECT_NEAR(client->send_bandwidth().kbps(), kStartRate.kbps(), 30); } -TEST_F(GoogCcNetworkControllerTest, DetectsHighRateInSafeResetTrial) { +TEST(GoogCcScenario, DetectsHighRateInSafeResetTrial) { ScopedFieldTrials trial( "WebRTC-Bwe-SafeResetOnRouteChange/Enabled,ack/" "WebRTC-SendSideBwe-WithOverhead/Enabled/"); @@ -745,8 +807,7 @@ TEST_F(GoogCcNetworkControllerTest, DetectsHighRateInSafeResetTrial) { EXPECT_GT(client->send_bandwidth().kbps(), kNewLinkCapacity.kbps() - 300); } -TEST_F(GoogCcNetworkControllerTest, - TargetRateReducedOnPacingBufferBuildupInTrial) { +TEST(GoogCcScenario, TargetRateReducedOnPacingBufferBuildupInTrial) { // Configure strict pacing to ensure build-up. ScopedFieldTrials trial( "WebRTC-CongestionWindow/QueueSize:100,MinBitrate:30000/" @@ -775,7 +836,7 @@ TEST_F(GoogCcNetworkControllerTest, EXPECT_LT(client->GetStats().pacer_delay_ms, 150); } -TEST_F(GoogCcNetworkControllerTest, NoBandwidthTogglingInLossControlTrial) { +TEST(GoogCcScenario, NoBandwidthTogglingInLossControlTrial) { ScopedFieldTrials trial("WebRTC-Bwe-LossBasedControl/Enabled/"); Scenario s("googcc_unit/no_toggling"); auto* send_net = s.CreateSimulationNode([&](NetworkSimulationConfig* c) { @@ -809,7 +870,7 @@ TEST_F(GoogCcNetworkControllerTest, NoBandwidthTogglingInLossControlTrial) { } } -TEST_F(GoogCcNetworkControllerTest, NoRttBackoffCollapseWhenVideoStops) { +TEST(GoogCcScenario, NoRttBackoffCollapseWhenVideoStops) { ScopedFieldTrials trial("WebRTC-Bwe-MaxRttLimit/limit:2s/"); Scenario s("googcc_unit/rttbackoff_video_stop"); auto* send_net = s.CreateSimulationNode([&](NetworkSimulationConfig* c) { @@ -831,7 +892,7 @@ TEST_F(GoogCcNetworkControllerTest, NoRttBackoffCollapseWhenVideoStops) { EXPECT_GT(client->send_bandwidth().kbps(), 1000); } -TEST_F(GoogCcNetworkControllerTest, NoCrashOnVeryLateFeedback) { +TEST(GoogCcScenario, NoCrashOnVeryLateFeedback) { Scenario s; auto ret_net = s.CreateMutableSimulationNode(NetworkSimulationConfig()); auto* route = s.CreateRoutes( @@ -855,7 +916,7 @@ TEST_F(GoogCcNetworkControllerTest, NoCrashOnVeryLateFeedback) { s.RunFor(TimeDelta::Seconds(2)); } -TEST_F(GoogCcNetworkControllerTest, IsFairToTCP) { +TEST(GoogCcScenario, IsFairToTCP) { Scenario s("googcc_unit/tcp_fairness"); NetworkSimulationConfig net_conf; net_conf.bandwidth = DataRate::KilobitsPerSec(1000); @@ -879,16 +940,16 @@ TEST_F(GoogCcNetworkControllerTest, IsFairToTCP) { EXPECT_LT(client->send_bandwidth().kbps(), 750); } -TEST(GoogCcScenario, FastRampupOnRembCapLiftedWithFieldTrial) { - ScopedFieldTrials trial("WebRTC-Bwe-ReceiverLimitCapsOnly/Enabled/"); +TEST(GoogCcScenario, FastRampupOnRembCapLifted) { DataRate final_estimate = - RunRembDipScenario("googcc_unit/fast_rampup_on_remb_cap_lifted"); + RunRembDipScenario("googcc_unit/default_fast_rampup_on_remb_cap_lifted"); EXPECT_GT(final_estimate.kbps(), 1500); } -TEST(GoogCcScenario, SlowRampupOnRembCapLifted) { +TEST(GoogCcScenario, SlowRampupOnRembCapLiftedWithFieldTrial) { + ScopedFieldTrials trial("WebRTC-Bwe-ReceiverLimitCapsOnly/Disabled/"); DataRate final_estimate = - RunRembDipScenario("googcc_unit/default_slow_rampup_on_remb_cap_lifted"); + RunRembDipScenario("googcc_unit/legacy_slow_rampup_on_remb_cap_lifted"); EXPECT_LT(final_estimate.kbps(), 1000); } diff --git a/modules/congestion_controller/goog_cc/inter_arrival_delta.cc b/modules/congestion_controller/goog_cc/inter_arrival_delta.cc index 791867db67..ecd5742272 100644 --- a/modules/congestion_controller/goog_cc/inter_arrival_delta.cc +++ b/modules/congestion_controller/goog_cc/inter_arrival_delta.cc @@ -102,8 +102,8 @@ bool InterArrivalDelta::ComputeDeltas(Timestamp send_time, return calculated_deltas; } -// Assumes that |timestamp| is not reordered compared to -// |current_timestamp_group_|. +// Assumes that `timestamp` is not reordered compared to +// `current_timestamp_group_`. bool InterArrivalDelta::NewTimestampGroup(Timestamp arrival_time, Timestamp send_time) const { if (current_timestamp_group_.IsFirstPacket()) { diff --git a/modules/congestion_controller/goog_cc/inter_arrival_delta.h b/modules/congestion_controller/goog_cc/inter_arrival_delta.h index 28dc806249..0617e34cdd 100644 --- a/modules/congestion_controller/goog_cc/inter_arrival_delta.h +++ b/modules/congestion_controller/goog_cc/inter_arrival_delta.h @@ -38,12 +38,12 @@ class InterArrivalDelta { // This function returns true if a delta was computed, or false if the current // group is still incomplete or if only one group has been completed. - // |send_time| is the send time. - // |arrival_time| is the time at which the packet arrived. - // |packet_size| is the size of the packet. - // |timestamp_delta| (output) is the computed send time delta. - // |arrival_time_delta_ms| (output) is the computed arrival-time delta. - // |packet_size_delta| (output) is the computed size delta. + // `send_time` is the send time. + // `arrival_time` is the time at which the packet arrived. + // `packet_size` is the size of the packet. + // `timestamp_delta` (output) is the computed send time delta. + // `arrival_time_delta_ms` (output) is the computed arrival-time delta. + // `packet_size_delta` (output) is the computed size delta. bool ComputeDeltas(Timestamp send_time, Timestamp arrival_time, Timestamp system_time, @@ -73,7 +73,7 @@ class InterArrivalDelta { }; // Returns true if the last packet was the end of the current batch and the - // packet with |send_time| is the first of a new batch. + // packet with `send_time` is the first of a new batch. bool NewTimestampGroup(Timestamp arrival_time, Timestamp send_time) const; bool BelongsToBurst(Timestamp arrival_time, Timestamp send_time) const; diff --git a/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc b/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc index c7f53c62f2..33974dc900 100644 --- a/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc +++ b/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc @@ -36,7 +36,7 @@ double GetIncreaseFactor(const LossBasedControlConfig& config, TimeDelta rtt) { } auto rtt_range = config.increase_high_rtt.Get() - config.increase_low_rtt; if (rtt_range <= TimeDelta::Zero()) { - RTC_NOTREACHED(); // Only on misconfiguration. + RTC_DCHECK_NOTREACHED(); // Only on misconfiguration. return config.min_increase_factor; } auto rtt_offset = rtt - config.increase_low_rtt; @@ -57,7 +57,7 @@ DataRate BitrateFromLoss(double loss, DataRate loss_bandwidth_balance, double exponent) { if (exponent <= 0) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return DataRate::Infinity(); } if (loss < 1e-5) @@ -69,7 +69,7 @@ double ExponentialUpdate(TimeDelta window, TimeDelta interval) { // Use the convention that exponential window length (which is really // infinite) is the time it takes to dampen to 1/e. if (window <= TimeDelta::Zero()) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return 1.0f; } return 1.0f - exp(interval / window * -1.0); @@ -134,7 +134,7 @@ void LossBasedBandwidthEstimation::UpdateLossStatistics( const std::vector& packet_results, Timestamp at_time) { if (packet_results.empty()) { - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return; } int loss_count = 0; diff --git a/modules/congestion_controller/goog_cc/loss_based_bwe_v2.cc b/modules/congestion_controller/goog_cc/loss_based_bwe_v2.cc new file mode 100644 index 0000000000..44041143bf --- /dev/null +++ b/modules/congestion_controller/goog_cc/loss_based_bwe_v2.cc @@ -0,0 +1,755 @@ +/* + * Copyright 2021 The WebRTC project authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/congestion_controller/goog_cc/loss_based_bwe_v2.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "api/transport/network_types.h" +#include "api/transport/webrtc_key_value_config.h" +#include "api/units/data_rate.h" +#include "api/units/data_size.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" +#include "rtc_base/experiments/field_trial_list.h" +#include "rtc_base/experiments/field_trial_parser.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +namespace { + +bool IsValid(DataRate datarate) { + return datarate.IsFinite(); +} + +bool IsValid(Timestamp timestamp) { + return timestamp.IsFinite(); +} + +struct PacketResultsSummary { + int num_packets = 0; + int num_lost_packets = 0; + DataSize total_size = DataSize::Zero(); + Timestamp first_send_time = Timestamp::PlusInfinity(); + Timestamp last_send_time = Timestamp::MinusInfinity(); +}; + +// Returns a `PacketResultsSummary` where `first_send_time` is `PlusInfinity, +// and `last_send_time` is `MinusInfinity`, if `packet_results` is empty. +PacketResultsSummary GetPacketResultsSummary( + rtc::ArrayView packet_results) { + PacketResultsSummary packet_results_summary; + + packet_results_summary.num_packets = packet_results.size(); + for (const PacketResult& packet : packet_results) { + if (!packet.IsReceived()) { + packet_results_summary.num_lost_packets++; + } + packet_results_summary.total_size += packet.sent_packet.size; + packet_results_summary.first_send_time = std::min( + packet_results_summary.first_send_time, packet.sent_packet.send_time); + packet_results_summary.last_send_time = std::max( + packet_results_summary.last_send_time, packet.sent_packet.send_time); + } + + return packet_results_summary; +} + +double GetLossProbability(double inherent_loss, + DataRate loss_limited_bandwidth, + DataRate sending_rate) { + if (inherent_loss < 0.0 || inherent_loss > 1.0) { + RTC_LOG(LS_WARNING) << "The inherent loss must be in [0,1]: " + << inherent_loss; + inherent_loss = std::min(std::max(inherent_loss, 0.0), 1.0); + } + if (!sending_rate.IsFinite()) { + RTC_LOG(LS_WARNING) << "The sending rate must be finite: " + << ToString(sending_rate); + } + if (!loss_limited_bandwidth.IsFinite()) { + RTC_LOG(LS_WARNING) << "The loss limited bandwidth must be finite: " + << ToString(loss_limited_bandwidth); + } + + // We approximate the loss model + // loss_probability = inherent_loss + (1 - inherent_loss) * + // max(0, sending_rate - bandwidth) / sending_rate + // by + // loss_probability = inherent_loss + + // max(0, sending_rate - bandwidth) / sending_rate + // as it allows for simpler calculations and makes little difference in + // practice. + double loss_probability = inherent_loss; + if (IsValid(sending_rate) && IsValid(loss_limited_bandwidth) && + (sending_rate > loss_limited_bandwidth)) { + loss_probability += (sending_rate - loss_limited_bandwidth) / sending_rate; + } + return std::min(std::max(loss_probability, 1.0e-6), 1.0 - 1.0e-6); +} + +} // namespace + +LossBasedBweV2::LossBasedBweV2(const WebRtcKeyValueConfig* key_value_config) + : config_(CreateConfig(key_value_config)) { + if (!config_.has_value()) { + RTC_LOG(LS_VERBOSE) << "The configuration does not specify that the " + "estimator should be enabled, disabling it."; + return; + } + if (!IsConfigValid()) { + RTC_LOG(LS_WARNING) + << "The configuration is not valid, disabling the estimator."; + config_.reset(); + return; + } + + current_estimate_.inherent_loss = config_->initial_inherent_loss_estimate; + observations_.resize(config_->observation_window_size); + temporal_weights_.resize(config_->observation_window_size); + instant_upper_bound_temporal_weights_.resize( + config_->observation_window_size); + CalculateTemporalWeights(); +} + +bool LossBasedBweV2::IsEnabled() const { + return config_.has_value(); +} + +bool LossBasedBweV2::IsReady() const { + return IsEnabled() && IsValid(current_estimate_.loss_limited_bandwidth) && + num_observations_ > 0; +} + +DataRate LossBasedBweV2::GetBandwidthEstimate() const { + if (!IsReady()) { + if (!IsEnabled()) { + RTC_LOG(LS_WARNING) + << "The estimator must be enabled before it can be used."; + } else { + if (!IsValid(current_estimate_.loss_limited_bandwidth)) { + RTC_LOG(LS_WARNING) + << "The estimator must be initialized before it can be used."; + } + if (num_observations_ <= 0) { + RTC_LOG(LS_WARNING) << "The estimator must receive enough loss " + "statistics before it can be used."; + } + } + return DataRate::PlusInfinity(); + } + + return std::min(current_estimate_.loss_limited_bandwidth, + GetInstantUpperBound()); +} + +void LossBasedBweV2::SetAcknowledgedBitrate(DataRate acknowledged_bitrate) { + if (IsValid(acknowledged_bitrate)) { + acknowledged_bitrate_ = acknowledged_bitrate; + } else { + RTC_LOG(LS_WARNING) << "The acknowledged bitrate must be finite: " + << ToString(acknowledged_bitrate); + } +} + +void LossBasedBweV2::SetBandwidthEstimate(DataRate bandwidth_estimate) { + if (IsValid(bandwidth_estimate)) { + current_estimate_.loss_limited_bandwidth = bandwidth_estimate; + } else { + RTC_LOG(LS_WARNING) << "The bandwidth estimate must be finite: " + << ToString(bandwidth_estimate); + } +} + +void LossBasedBweV2::UpdateBandwidthEstimate( + rtc::ArrayView packet_results, + DataRate delay_based_estimate) { + if (!IsEnabled()) { + RTC_LOG(LS_WARNING) + << "The estimator must be enabled before it can be used."; + return; + } + if (packet_results.empty()) { + RTC_LOG(LS_VERBOSE) + << "The estimate cannot be updated without any loss statistics."; + return; + } + + if (!PushBackObservation(packet_results)) { + return; + } + + if (!IsValid(current_estimate_.loss_limited_bandwidth)) { + RTC_LOG(LS_VERBOSE) + << "The estimator must be initialized before it can be used."; + return; + } + + ChannelParameters best_candidate = current_estimate_; + double objective_max = std::numeric_limits::lowest(); + for (ChannelParameters candidate : GetCandidates(delay_based_estimate)) { + NewtonsMethodUpdate(candidate); + + const double candidate_objective = GetObjective(candidate); + if (candidate_objective > objective_max) { + objective_max = candidate_objective; + best_candidate = candidate; + } + } + if (best_candidate.loss_limited_bandwidth < + current_estimate_.loss_limited_bandwidth) { + last_time_estimate_reduced_ = last_send_time_most_recent_observation_; + } + current_estimate_ = best_candidate; +} + +// Returns a `LossBasedBweV2::Config` iff the `key_value_config` specifies a +// configuration for the `LossBasedBweV2` which is explicitly enabled. +absl::optional LossBasedBweV2::CreateConfig( + const WebRtcKeyValueConfig* key_value_config) { + FieldTrialParameter enabled("Enabled", false); + FieldTrialParameter bandwidth_rampup_upper_bound_factor( + "BwRampupUpperBoundFactor", 1.1); + FieldTrialParameter rampup_acceleration_max_factor( + "BwRampupAccelMaxFactor", 0.0); + FieldTrialParameter rampup_acceleration_maxout_time( + "BwRampupAccelMaxoutTime", TimeDelta::Seconds(60)); + FieldTrialList candidate_factors("CandidateFactors", + {1.05, 1.0, 0.95}); + FieldTrialParameter higher_bandwidth_bias_factor("HigherBwBiasFactor", + 0.00001); + FieldTrialParameter higher_log_bandwidth_bias_factor( + "HigherLogBwBiasFactor", 0.001); + FieldTrialParameter inherent_loss_lower_bound( + "InherentLossLowerBound", 1.0e-3); + FieldTrialParameter inherent_loss_upper_bound_bandwidth_balance( + "InherentLossUpperBoundBwBalance", DataRate::KilobitsPerSec(15.0)); + FieldTrialParameter inherent_loss_upper_bound_offset( + "InherentLossUpperBoundOffset", 0.05); + FieldTrialParameter initial_inherent_loss_estimate( + "InitialInherentLossEstimate", 0.01); + FieldTrialParameter newton_iterations("NewtonIterations", 1); + FieldTrialParameter newton_step_size("NewtonStepSize", 0.5); + FieldTrialParameter append_acknowledged_rate_candidate( + "AckedRateCandidate", true); + FieldTrialParameter append_delay_based_estimate_candidate( + "DelayBasedCandidate", false); + FieldTrialParameter observation_duration_lower_bound( + "ObservationDurationLowerBound", TimeDelta::Seconds(1)); + FieldTrialParameter observation_window_size("ObservationWindowSize", 20); + FieldTrialParameter sending_rate_smoothing_factor( + "SendingRateSmoothingFactor", 0.0); + FieldTrialParameter instant_upper_bound_temporal_weight_factor( + "InstantUpperBoundTemporalWeightFactor", 0.99); + FieldTrialParameter instant_upper_bound_bandwidth_balance( + "InstantUpperBoundBwBalance", DataRate::KilobitsPerSec(15.0)); + FieldTrialParameter instant_upper_bound_loss_offset( + "InstantUpperBoundLossOffset", 0.05); + FieldTrialParameter temporal_weight_factor("TemporalWeightFactor", + 0.99); + + if (key_value_config) { + ParseFieldTrial({&enabled, + &bandwidth_rampup_upper_bound_factor, + &rampup_acceleration_max_factor, + &rampup_acceleration_maxout_time, + &candidate_factors, + &higher_bandwidth_bias_factor, + &higher_log_bandwidth_bias_factor, + &inherent_loss_lower_bound, + &inherent_loss_upper_bound_bandwidth_balance, + &inherent_loss_upper_bound_offset, + &initial_inherent_loss_estimate, + &newton_iterations, + &newton_step_size, + &append_acknowledged_rate_candidate, + &append_delay_based_estimate_candidate, + &observation_duration_lower_bound, + &observation_window_size, + &sending_rate_smoothing_factor, + &instant_upper_bound_temporal_weight_factor, + &instant_upper_bound_bandwidth_balance, + &instant_upper_bound_loss_offset, + &temporal_weight_factor}, + key_value_config->Lookup("WebRTC-Bwe-LossBasedBweV2")); + } + + absl::optional config; + if (!enabled.Get()) { + return config; + } + config.emplace(); + config->bandwidth_rampup_upper_bound_factor = + bandwidth_rampup_upper_bound_factor.Get(); + config->rampup_acceleration_max_factor = rampup_acceleration_max_factor.Get(); + config->rampup_acceleration_maxout_time = + rampup_acceleration_maxout_time.Get(); + config->candidate_factors = candidate_factors.Get(); + config->higher_bandwidth_bias_factor = higher_bandwidth_bias_factor.Get(); + config->higher_log_bandwidth_bias_factor = + higher_log_bandwidth_bias_factor.Get(); + config->inherent_loss_lower_bound = inherent_loss_lower_bound.Get(); + config->inherent_loss_upper_bound_bandwidth_balance = + inherent_loss_upper_bound_bandwidth_balance.Get(); + config->inherent_loss_upper_bound_offset = + inherent_loss_upper_bound_offset.Get(); + config->initial_inherent_loss_estimate = initial_inherent_loss_estimate.Get(); + config->newton_iterations = newton_iterations.Get(); + config->newton_step_size = newton_step_size.Get(); + config->append_acknowledged_rate_candidate = + append_acknowledged_rate_candidate.Get(); + config->append_delay_based_estimate_candidate = + append_delay_based_estimate_candidate.Get(); + config->observation_duration_lower_bound = + observation_duration_lower_bound.Get(); + config->observation_window_size = observation_window_size.Get(); + config->sending_rate_smoothing_factor = sending_rate_smoothing_factor.Get(); + config->instant_upper_bound_temporal_weight_factor = + instant_upper_bound_temporal_weight_factor.Get(); + config->instant_upper_bound_bandwidth_balance = + instant_upper_bound_bandwidth_balance.Get(); + config->instant_upper_bound_loss_offset = + instant_upper_bound_loss_offset.Get(); + config->temporal_weight_factor = temporal_weight_factor.Get(); + return config; +} + +bool LossBasedBweV2::IsConfigValid() const { + if (!config_.has_value()) { + return false; + } + + bool valid = true; + + if (config_->bandwidth_rampup_upper_bound_factor <= 1.0) { + RTC_LOG(LS_WARNING) + << "The bandwidth rampup upper bound factor must be greater than 1: " + << config_->bandwidth_rampup_upper_bound_factor; + valid = false; + } + if (config_->rampup_acceleration_max_factor < 0.0) { + RTC_LOG(LS_WARNING) + << "The rampup acceleration max factor must be non-negative.: " + << config_->rampup_acceleration_max_factor; + valid = false; + } + if (config_->rampup_acceleration_maxout_time <= TimeDelta::Zero()) { + RTC_LOG(LS_WARNING) + << "The rampup acceleration maxout time must be above zero: " + << config_->rampup_acceleration_maxout_time.seconds(); + valid = false; + } + for (double candidate_factor : config_->candidate_factors) { + if (candidate_factor <= 0.0) { + RTC_LOG(LS_WARNING) << "All candidate factors must be greater than zero: " + << candidate_factor; + valid = false; + } + } + + // Ensure that the configuration allows generation of at least one candidate + // other than the current estimate. + if (!config_->append_acknowledged_rate_candidate && + !config_->append_delay_based_estimate_candidate && + !absl::c_any_of(config_->candidate_factors, + [](double cf) { return cf != 1.0; })) { + RTC_LOG(LS_WARNING) + << "The configuration does not allow generating candidates. Specify " + "a candidate factor other than 1.0, allow the acknowledged rate " + "to be a candidate, and/or allow the delay based estimate to be a " + "candidate."; + valid = false; + } + + if (config_->higher_bandwidth_bias_factor < 0.0) { + RTC_LOG(LS_WARNING) + << "The higher bandwidth bias factor must be non-negative: " + << config_->higher_bandwidth_bias_factor; + valid = false; + } + if (config_->inherent_loss_lower_bound < 0.0 || + config_->inherent_loss_lower_bound >= 1.0) { + RTC_LOG(LS_WARNING) << "The inherent loss lower bound must be in [0, 1): " + << config_->inherent_loss_lower_bound; + valid = false; + } + if (config_->inherent_loss_upper_bound_bandwidth_balance <= + DataRate::Zero()) { + RTC_LOG(LS_WARNING) + << "The inherent loss upper bound bandwidth balance " + "must be positive: " + << ToString(config_->inherent_loss_upper_bound_bandwidth_balance); + valid = false; + } + if (config_->inherent_loss_upper_bound_offset < + config_->inherent_loss_lower_bound || + config_->inherent_loss_upper_bound_offset >= 1.0) { + RTC_LOG(LS_WARNING) << "The inherent loss upper bound must be greater " + "than or equal to the inherent " + "loss lower bound, which is " + << config_->inherent_loss_lower_bound + << ", and less than 1: " + << config_->inherent_loss_upper_bound_offset; + valid = false; + } + if (config_->initial_inherent_loss_estimate < 0.0 || + config_->initial_inherent_loss_estimate >= 1.0) { + RTC_LOG(LS_WARNING) + << "The initial inherent loss estimate must be in [0, 1): " + << config_->initial_inherent_loss_estimate; + valid = false; + } + if (config_->newton_iterations <= 0) { + RTC_LOG(LS_WARNING) << "The number of Newton iterations must be positive: " + << config_->newton_iterations; + valid = false; + } + if (config_->newton_step_size <= 0.0) { + RTC_LOG(LS_WARNING) << "The Newton step size must be positive: " + << config_->newton_step_size; + valid = false; + } + if (config_->observation_duration_lower_bound <= TimeDelta::Zero()) { + RTC_LOG(LS_WARNING) + << "The observation duration lower bound must be positive: " + << ToString(config_->observation_duration_lower_bound); + valid = false; + } + if (config_->observation_window_size < 2) { + RTC_LOG(LS_WARNING) << "The observation window size must be at least 2: " + << config_->observation_window_size; + valid = false; + } + if (config_->sending_rate_smoothing_factor < 0.0 || + config_->sending_rate_smoothing_factor >= 1.0) { + RTC_LOG(LS_WARNING) + << "The sending rate smoothing factor must be in [0, 1): " + << config_->sending_rate_smoothing_factor; + valid = false; + } + if (config_->instant_upper_bound_temporal_weight_factor <= 0.0 || + config_->instant_upper_bound_temporal_weight_factor > 1.0) { + RTC_LOG(LS_WARNING) + << "The instant upper bound temporal weight factor must be in (0, 1]" + << config_->instant_upper_bound_temporal_weight_factor; + valid = false; + } + if (config_->instant_upper_bound_bandwidth_balance <= DataRate::Zero()) { + RTC_LOG(LS_WARNING) + << "The instant upper bound bandwidth balance must be positive: " + << ToString(config_->instant_upper_bound_bandwidth_balance); + valid = false; + } + if (config_->instant_upper_bound_loss_offset < 0.0 || + config_->instant_upper_bound_loss_offset >= 1.0) { + RTC_LOG(LS_WARNING) + << "The instant upper bound loss offset must be in [0, 1): " + << config_->instant_upper_bound_loss_offset; + valid = false; + } + if (config_->temporal_weight_factor <= 0.0 || + config_->temporal_weight_factor > 1.0) { + RTC_LOG(LS_WARNING) << "The temporal weight factor must be in (0, 1]: " + << config_->temporal_weight_factor; + valid = false; + } + + return valid; +} + +double LossBasedBweV2::GetAverageReportedLossRatio() const { + if (num_observations_ <= 0) { + return 0.0; + } + + int num_packets = 0; + int num_lost_packets = 0; + for (const Observation& observation : observations_) { + if (!observation.IsInitialized()) { + continue; + } + + double instant_temporal_weight = + instant_upper_bound_temporal_weights_[(num_observations_ - 1) - + observation.id]; + num_packets += instant_temporal_weight * observation.num_packets; + num_lost_packets += instant_temporal_weight * observation.num_lost_packets; + } + + return static_cast(num_lost_packets) / num_packets; +} + +DataRate LossBasedBweV2::GetCandidateBandwidthUpperBound() const { + if (!acknowledged_bitrate_.has_value()) + return DataRate::PlusInfinity(); + + DataRate candidate_bandwidth_upper_bound = + config_->bandwidth_rampup_upper_bound_factor * (*acknowledged_bitrate_); + + if (config_->rampup_acceleration_max_factor > 0.0) { + const TimeDelta time_since_bandwidth_reduced = std::min( + config_->rampup_acceleration_maxout_time, + std::max(TimeDelta::Zero(), last_send_time_most_recent_observation_ - + last_time_estimate_reduced_)); + const double rampup_acceleration = config_->rampup_acceleration_max_factor * + time_since_bandwidth_reduced / + config_->rampup_acceleration_maxout_time; + + candidate_bandwidth_upper_bound += + rampup_acceleration * (*acknowledged_bitrate_); + } + return candidate_bandwidth_upper_bound; +} + +std::vector LossBasedBweV2::GetCandidates( + DataRate delay_based_estimate) const { + std::vector bandwidths; + for (double candidate_factor : config_->candidate_factors) { + bandwidths.push_back(candidate_factor * + current_estimate_.loss_limited_bandwidth); + } + + if (acknowledged_bitrate_.has_value() && + config_->append_acknowledged_rate_candidate) { + bandwidths.push_back(*acknowledged_bitrate_); + } + + if (IsValid(delay_based_estimate) && + config_->append_delay_based_estimate_candidate) { + bandwidths.push_back(delay_based_estimate); + } + + const DataRate candidate_bandwidth_upper_bound = + GetCandidateBandwidthUpperBound(); + + std::vector candidates; + candidates.resize(bandwidths.size()); + for (size_t i = 0; i < bandwidths.size(); ++i) { + ChannelParameters candidate = current_estimate_; + candidate.loss_limited_bandwidth = std::min( + bandwidths[i], std::max(current_estimate_.loss_limited_bandwidth, + candidate_bandwidth_upper_bound)); + candidate.inherent_loss = GetFeasibleInherentLoss(candidate); + candidates[i] = candidate; + } + return candidates; +} + +LossBasedBweV2::Derivatives LossBasedBweV2::GetDerivatives( + const ChannelParameters& channel_parameters) const { + Derivatives derivatives; + + for (const Observation& observation : observations_) { + if (!observation.IsInitialized()) { + continue; + } + + double loss_probability = GetLossProbability( + channel_parameters.inherent_loss, + channel_parameters.loss_limited_bandwidth, observation.sending_rate); + + double temporal_weight = + temporal_weights_[(num_observations_ - 1) - observation.id]; + + derivatives.first += + temporal_weight * + ((observation.num_lost_packets / loss_probability) - + (observation.num_received_packets / (1.0 - loss_probability))); + derivatives.second -= + temporal_weight * + ((observation.num_lost_packets / std::pow(loss_probability, 2)) + + (observation.num_received_packets / + std::pow(1.0 - loss_probability, 2))); + } + + if (derivatives.second >= 0.0) { + RTC_LOG(LS_ERROR) << "The second derivative is mathematically guaranteed " + "to be negative but is " + << derivatives.second << "."; + derivatives.second = -1.0e-6; + } + + return derivatives; +} + +double LossBasedBweV2::GetFeasibleInherentLoss( + const ChannelParameters& channel_parameters) const { + return std::min( + std::max(channel_parameters.inherent_loss, + config_->inherent_loss_lower_bound), + GetInherentLossUpperBound(channel_parameters.loss_limited_bandwidth)); +} + +double LossBasedBweV2::GetInherentLossUpperBound(DataRate bandwidth) const { + if (bandwidth.IsZero()) { + return 1.0; + } + + double inherent_loss_upper_bound = + config_->inherent_loss_upper_bound_offset + + config_->inherent_loss_upper_bound_bandwidth_balance / bandwidth; + return std::min(inherent_loss_upper_bound, 1.0); +} + +double LossBasedBweV2::GetHighBandwidthBias(DataRate bandwidth) const { + if (IsValid(bandwidth)) { + return config_->higher_bandwidth_bias_factor * bandwidth.kbps() + + config_->higher_log_bandwidth_bias_factor * + std::log(1.0 + bandwidth.kbps()); + } + return 0.0; +} + +double LossBasedBweV2::GetObjective( + const ChannelParameters& channel_parameters) const { + double objective = 0.0; + + const double high_bandwidth_bias = + GetHighBandwidthBias(channel_parameters.loss_limited_bandwidth); + + for (const Observation& observation : observations_) { + if (!observation.IsInitialized()) { + continue; + } + + double loss_probability = GetLossProbability( + channel_parameters.inherent_loss, + channel_parameters.loss_limited_bandwidth, observation.sending_rate); + + double temporal_weight = + temporal_weights_[(num_observations_ - 1) - observation.id]; + + objective += + temporal_weight * + ((observation.num_lost_packets * std::log(loss_probability)) + + (observation.num_received_packets * std::log(1.0 - loss_probability))); + objective += + temporal_weight * high_bandwidth_bias * observation.num_packets; + } + + return objective; +} + +DataRate LossBasedBweV2::GetSendingRate( + DataRate instantaneous_sending_rate) const { + if (num_observations_ <= 0) { + return instantaneous_sending_rate; + } + + const int most_recent_observation_idx = + (num_observations_ - 1) % config_->observation_window_size; + const Observation& most_recent_observation = + observations_[most_recent_observation_idx]; + DataRate sending_rate_previous_observation = + most_recent_observation.sending_rate; + + return config_->sending_rate_smoothing_factor * + sending_rate_previous_observation + + (1.0 - config_->sending_rate_smoothing_factor) * + instantaneous_sending_rate; +} + +DataRate LossBasedBweV2::GetInstantUpperBound() const { + return cached_instant_upper_bound_.value_or(DataRate::PlusInfinity()); +} + +void LossBasedBweV2::CalculateInstantUpperBound() { + DataRate instant_limit = DataRate::PlusInfinity(); + const double average_reported_loss_ratio = GetAverageReportedLossRatio(); + if (average_reported_loss_ratio > config_->instant_upper_bound_loss_offset) { + instant_limit = config_->instant_upper_bound_bandwidth_balance / + (average_reported_loss_ratio - + config_->instant_upper_bound_loss_offset); + } + cached_instant_upper_bound_ = instant_limit; +} + +void LossBasedBweV2::CalculateTemporalWeights() { + for (int i = 0; i < config_->observation_window_size; ++i) { + temporal_weights_[i] = std::pow(config_->temporal_weight_factor, i); + instant_upper_bound_temporal_weights_[i] = + std::pow(config_->instant_upper_bound_temporal_weight_factor, i); + } +} + +void LossBasedBweV2::NewtonsMethodUpdate( + ChannelParameters& channel_parameters) const { + if (num_observations_ <= 0) { + return; + } + + for (int i = 0; i < config_->newton_iterations; ++i) { + const Derivatives derivatives = GetDerivatives(channel_parameters); + channel_parameters.inherent_loss -= + config_->newton_step_size * derivatives.first / derivatives.second; + channel_parameters.inherent_loss = + GetFeasibleInherentLoss(channel_parameters); + } +} + +bool LossBasedBweV2::PushBackObservation( + rtc::ArrayView packet_results) { + if (packet_results.empty()) { + return false; + } + + PacketResultsSummary packet_results_summary = + GetPacketResultsSummary(packet_results); + + partial_observation_.num_packets += packet_results_summary.num_packets; + partial_observation_.num_lost_packets += + packet_results_summary.num_lost_packets; + partial_observation_.size += packet_results_summary.total_size; + + // This is the first packet report we have received. + if (!IsValid(last_send_time_most_recent_observation_)) { + last_send_time_most_recent_observation_ = + packet_results_summary.first_send_time; + } + + const Timestamp last_send_time = packet_results_summary.last_send_time; + const TimeDelta observation_duration = + last_send_time - last_send_time_most_recent_observation_; + + // Too small to be meaningful. + if (observation_duration < config_->observation_duration_lower_bound) { + return false; + } + + last_send_time_most_recent_observation_ = last_send_time; + + Observation observation; + observation.num_packets = partial_observation_.num_packets; + observation.num_lost_packets = partial_observation_.num_lost_packets; + observation.num_received_packets = + observation.num_packets - observation.num_lost_packets; + observation.sending_rate = + GetSendingRate(partial_observation_.size / observation_duration); + observation.id = num_observations_++; + observations_[observation.id % config_->observation_window_size] = + observation; + + partial_observation_ = PartialObservation(); + + CalculateInstantUpperBound(); + return true; +} + +} // namespace webrtc diff --git a/modules/congestion_controller/goog_cc/loss_based_bwe_v2.h b/modules/congestion_controller/goog_cc/loss_based_bwe_v2.h new file mode 100644 index 0000000000..f764892b55 --- /dev/null +++ b/modules/congestion_controller/goog_cc/loss_based_bwe_v2.h @@ -0,0 +1,146 @@ +/* + * Copyright 2021 The WebRTC project authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_LOSS_BASED_BWE_V2_H_ +#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_LOSS_BASED_BWE_V2_H_ + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "api/transport/network_types.h" +#include "api/transport/webrtc_key_value_config.h" +#include "api/units/data_rate.h" +#include "api/units/data_size.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" + +namespace webrtc { + +class LossBasedBweV2 { + public: + // Creates a disabled `LossBasedBweV2` if the + // `key_value_config` is not valid. + explicit LossBasedBweV2(const WebRtcKeyValueConfig* key_value_config); + + LossBasedBweV2(const LossBasedBweV2&) = delete; + LossBasedBweV2& operator=(const LossBasedBweV2&) = delete; + + ~LossBasedBweV2() = default; + + bool IsEnabled() const; + // Returns true iff a BWE can be calculated, i.e., the estimator has been + // initialized with a BWE and then has received enough `PacketResult`s. + bool IsReady() const; + + // Returns `DataRate::PlusInfinity` if no BWE can be calculated. + DataRate GetBandwidthEstimate() const; + + void SetAcknowledgedBitrate(DataRate acknowledged_bitrate); + void SetBandwidthEstimate(DataRate bandwidth_estimate); + + void UpdateBandwidthEstimate( + rtc::ArrayView packet_results, + DataRate delay_based_estimate); + + private: + struct ChannelParameters { + double inherent_loss = 0.0; + DataRate loss_limited_bandwidth = DataRate::MinusInfinity(); + }; + + struct Config { + double bandwidth_rampup_upper_bound_factor = 0.0; + double rampup_acceleration_max_factor = 0.0; + TimeDelta rampup_acceleration_maxout_time = TimeDelta::Zero(); + std::vector candidate_factors; + double higher_bandwidth_bias_factor = 0.0; + double higher_log_bandwidth_bias_factor = 0.0; + double inherent_loss_lower_bound = 0.0; + DataRate inherent_loss_upper_bound_bandwidth_balance = + DataRate::MinusInfinity(); + double inherent_loss_upper_bound_offset = 0.0; + double initial_inherent_loss_estimate = 0.0; + int newton_iterations = 0; + double newton_step_size = 0.0; + bool append_acknowledged_rate_candidate = true; + bool append_delay_based_estimate_candidate = false; + TimeDelta observation_duration_lower_bound = TimeDelta::Zero(); + int observation_window_size = 0; + double sending_rate_smoothing_factor = 0.0; + double instant_upper_bound_temporal_weight_factor = 0.0; + DataRate instant_upper_bound_bandwidth_balance = DataRate::MinusInfinity(); + double instant_upper_bound_loss_offset = 0.0; + double temporal_weight_factor = 0.0; + }; + + struct Derivatives { + double first = 0.0; + double second = 0.0; + }; + + struct Observation { + bool IsInitialized() const { return id != -1; } + + int num_packets = 0; + int num_lost_packets = 0; + int num_received_packets = 0; + DataRate sending_rate = DataRate::MinusInfinity(); + int id = -1; + }; + + struct PartialObservation { + int num_packets = 0; + int num_lost_packets = 0; + DataSize size = DataSize::Zero(); + }; + + static absl::optional CreateConfig( + const WebRtcKeyValueConfig* key_value_config); + bool IsConfigValid() const; + + // Returns `0.0` if not enough loss statistics have been received. + double GetAverageReportedLossRatio() const; + std::vector GetCandidates( + DataRate delay_based_estimate) const; + DataRate GetCandidateBandwidthUpperBound() const; + Derivatives GetDerivatives(const ChannelParameters& channel_parameters) const; + double GetFeasibleInherentLoss( + const ChannelParameters& channel_parameters) const; + double GetInherentLossUpperBound(DataRate bandwidth) const; + double GetHighBandwidthBias(DataRate bandwidth) const; + double GetObjective(const ChannelParameters& channel_parameters) const; + DataRate GetSendingRate(DataRate instantaneous_sending_rate) const; + DataRate GetInstantUpperBound() const; + void CalculateInstantUpperBound(); + + void CalculateTemporalWeights(); + void NewtonsMethodUpdate(ChannelParameters& channel_parameters) const; + + // Returns false if no observation was created. + bool PushBackObservation(rtc::ArrayView packet_results); + + absl::optional acknowledged_bitrate_; + absl::optional config_; + ChannelParameters current_estimate_; + int num_observations_ = 0; + std::vector observations_; + PartialObservation partial_observation_; + Timestamp last_send_time_most_recent_observation_ = Timestamp::PlusInfinity(); + Timestamp last_time_estimate_reduced_ = Timestamp::MinusInfinity(); + absl::optional cached_instant_upper_bound_; + std::vector instant_upper_bound_temporal_weights_; + std::vector temporal_weights_; +}; + +} // namespace webrtc + +#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_LOSS_BASED_BWE_V2_H_ diff --git a/modules/congestion_controller/goog_cc/loss_based_bwe_v2_test.cc b/modules/congestion_controller/goog_cc/loss_based_bwe_v2_test.cc new file mode 100644 index 0000000000..05334885f2 --- /dev/null +++ b/modules/congestion_controller/goog_cc/loss_based_bwe_v2_test.cc @@ -0,0 +1,335 @@ +/* + * Copyright 2021 The WebRTC project authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/congestion_controller/goog_cc/loss_based_bwe_v2.h" + +#include + +#include "api/transport/network_types.h" +#include "api/units/data_rate.h" +#include "api/units/data_size.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" +#include "rtc_base/strings/string_builder.h" +#include "test/explicit_key_value_config.h" +#include "test/gtest.h" + +namespace webrtc { + +namespace { + +using ::webrtc::test::ExplicitKeyValueConfig; + +constexpr TimeDelta kObservationDurationLowerBound = TimeDelta::Millis(200); + +std::string Config(bool enabled, bool valid) { + char buffer[1024]; + rtc::SimpleStringBuilder config_string(buffer); + + config_string << "WebRTC-Bwe-LossBasedBweV2/"; + + if (enabled) { + config_string << "Enabled:true"; + } else { + config_string << "Enabled:false"; + } + + if (valid) { + config_string << ",BwRampupUpperBoundFactor:1.2"; + } else { + config_string << ",BwRampupUpperBoundFactor:0.0"; + } + + config_string + << ",CandidateFactors:0.9|1.1,HigherBwBiasFactor:0.01," + "InherentLossLowerBound:0.001,InherentLossUpperBoundBwBalance:14kbps," + "InherentLossUpperBoundOffset:0.9,InitialInherentLossEstimate:0.01," + "NewtonIterations:2,NewtonStepSize:0.4,ObservationWindowSize:15," + "SendingRateSmoothingFactor:0.01," + "InstantUpperBoundTemporalWeightFactor:0.97," + "InstantUpperBoundBwBalance:90kbps," + "InstantUpperBoundLossOffset:0.1,TemporalWeightFactor:0.98"; + + config_string.AppendFormat( + ",ObservationDurationLowerBound:%dms", + static_cast(kObservationDurationLowerBound.ms())); + + config_string << "/"; + + return config_string.str(); +} + +TEST(LossBasedBweV2Test, EnabledWhenGivenValidConfigurationValues) { + ExplicitKeyValueConfig key_value_config( + Config(/*enabled=*/true, /*valid=*/true)); + LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config); + + EXPECT_TRUE(loss_based_bandwidth_estimator.IsEnabled()); +} + +TEST(LossBasedBweV2Test, DisabledWhenGivenDisabledConfiguration) { + ExplicitKeyValueConfig key_value_config( + Config(/*enabled=*/false, /*valid=*/true)); + LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config); + + EXPECT_FALSE(loss_based_bandwidth_estimator.IsEnabled()); +} + +TEST(LossBasedBweV2Test, DisabledWhenGivenNonValidConfigurationValues) { + ExplicitKeyValueConfig key_value_config( + Config(/*enabled=*/true, /*valid=*/false)); + LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config); + + EXPECT_FALSE(loss_based_bandwidth_estimator.IsEnabled()); +} + +TEST(LossBasedBweV2Test, DisabledWhenGivenNonPositiveCandidateFactor) { + ExplicitKeyValueConfig key_value_config_negative_candidate_factor( + "WebRTC-Bwe-LossBasedBweV2/Enabled:true,CandidateFactors:-1.3|1.1/"); + LossBasedBweV2 loss_based_bandwidth_estimator_1( + &key_value_config_negative_candidate_factor); + EXPECT_FALSE(loss_based_bandwidth_estimator_1.IsEnabled()); + + ExplicitKeyValueConfig key_value_config_zero_candidate_factor( + "WebRTC-Bwe-LossBasedBweV2/Enabled:true,CandidateFactors:0.0|1.1/"); + LossBasedBweV2 loss_based_bandwidth_estimator_2( + &key_value_config_zero_candidate_factor); + EXPECT_FALSE(loss_based_bandwidth_estimator_2.IsEnabled()); +} + +TEST(LossBasedBweV2Test, + DisabledWhenGivenConfigurationThatDoesNotAllowGeneratingCandidates) { + ExplicitKeyValueConfig key_value_config( + "WebRTC-Bwe-LossBasedBweV2/" + "Enabled:true,CandidateFactors:1.0,AckedRateCandidate:false," + "DelayBasedCandidate:false/"); + LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config); + EXPECT_FALSE(loss_based_bandwidth_estimator.IsEnabled()); +} + +TEST(LossBasedBweV2Test, BandwidthEstimateGivenInitializationAndThenFeedback) { + PacketResult enough_feedback[2]; + enough_feedback[0].sent_packet.size = DataSize::Bytes(15'000); + enough_feedback[1].sent_packet.size = DataSize::Bytes(15'000); + enough_feedback[0].sent_packet.send_time = Timestamp::Zero(); + enough_feedback[1].sent_packet.send_time = + Timestamp::Zero() + kObservationDurationLowerBound; + enough_feedback[0].receive_time = + Timestamp::Zero() + kObservationDurationLowerBound; + enough_feedback[1].receive_time = + Timestamp::Zero() + 2 * kObservationDurationLowerBound; + + ExplicitKeyValueConfig key_value_config( + Config(/*enabled=*/true, /*valid=*/true)); + LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config); + + loss_based_bandwidth_estimator.SetBandwidthEstimate( + DataRate::KilobitsPerSec(600)); + loss_based_bandwidth_estimator.UpdateBandwidthEstimate( + enough_feedback, DataRate::PlusInfinity()); + + EXPECT_TRUE(loss_based_bandwidth_estimator.IsReady()); + EXPECT_TRUE(loss_based_bandwidth_estimator.GetBandwidthEstimate().IsFinite()); +} + +TEST(LossBasedBweV2Test, NoBandwidthEstimateGivenNoInitialization) { + PacketResult enough_feedback[2]; + enough_feedback[0].sent_packet.size = DataSize::Bytes(15'000); + enough_feedback[1].sent_packet.size = DataSize::Bytes(15'000); + enough_feedback[0].sent_packet.send_time = Timestamp::Zero(); + enough_feedback[1].sent_packet.send_time = + Timestamp::Zero() + kObservationDurationLowerBound; + enough_feedback[0].receive_time = + Timestamp::Zero() + kObservationDurationLowerBound; + enough_feedback[1].receive_time = + Timestamp::Zero() + 2 * kObservationDurationLowerBound; + + ExplicitKeyValueConfig key_value_config( + Config(/*enabled=*/true, /*valid=*/true)); + LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config); + + loss_based_bandwidth_estimator.UpdateBandwidthEstimate( + enough_feedback, DataRate::PlusInfinity()); + + EXPECT_FALSE(loss_based_bandwidth_estimator.IsReady()); + EXPECT_TRUE( + loss_based_bandwidth_estimator.GetBandwidthEstimate().IsPlusInfinity()); +} + +TEST(LossBasedBweV2Test, NoBandwidthEstimateGivenNotEnoughFeedback) { + // Create packet results where the observation duration is less than the lower + // bound. + PacketResult not_enough_feedback[2]; + not_enough_feedback[0].sent_packet.size = DataSize::Bytes(15'000); + not_enough_feedback[1].sent_packet.size = DataSize::Bytes(15'000); + not_enough_feedback[0].sent_packet.send_time = Timestamp::Zero(); + not_enough_feedback[1].sent_packet.send_time = + Timestamp::Zero() + kObservationDurationLowerBound / 2; + not_enough_feedback[0].receive_time = + Timestamp::Zero() + kObservationDurationLowerBound / 2; + not_enough_feedback[1].receive_time = + Timestamp::Zero() + kObservationDurationLowerBound; + + ExplicitKeyValueConfig key_value_config( + Config(/*enabled=*/true, /*valid=*/true)); + LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config); + + loss_based_bandwidth_estimator.SetBandwidthEstimate( + DataRate::KilobitsPerSec(600)); + + EXPECT_FALSE(loss_based_bandwidth_estimator.IsReady()); + EXPECT_TRUE( + loss_based_bandwidth_estimator.GetBandwidthEstimate().IsPlusInfinity()); + + loss_based_bandwidth_estimator.UpdateBandwidthEstimate( + not_enough_feedback, DataRate::PlusInfinity()); + + EXPECT_FALSE(loss_based_bandwidth_estimator.IsReady()); + EXPECT_TRUE( + loss_based_bandwidth_estimator.GetBandwidthEstimate().IsPlusInfinity()); +} + +TEST(LossBasedBweV2Test, + SetValueIsTheEstimateUntilAdditionalFeedbackHasBeenReceived) { + PacketResult enough_feedback_1[2]; + PacketResult enough_feedback_2[2]; + enough_feedback_1[0].sent_packet.size = DataSize::Bytes(15'000); + enough_feedback_1[1].sent_packet.size = DataSize::Bytes(15'000); + enough_feedback_2[0].sent_packet.size = DataSize::Bytes(15'000); + enough_feedback_2[1].sent_packet.size = DataSize::Bytes(15'000); + enough_feedback_1[0].sent_packet.send_time = Timestamp::Zero(); + enough_feedback_1[1].sent_packet.send_time = + Timestamp::Zero() + kObservationDurationLowerBound; + enough_feedback_2[0].sent_packet.send_time = + Timestamp::Zero() + 2 * kObservationDurationLowerBound; + enough_feedback_2[1].sent_packet.send_time = + Timestamp::Zero() + 3 * kObservationDurationLowerBound; + enough_feedback_1[0].receive_time = + Timestamp::Zero() + kObservationDurationLowerBound; + enough_feedback_1[1].receive_time = + Timestamp::Zero() + 2 * kObservationDurationLowerBound; + enough_feedback_2[0].receive_time = + Timestamp::Zero() + 3 * kObservationDurationLowerBound; + enough_feedback_2[1].receive_time = + Timestamp::Zero() + 4 * kObservationDurationLowerBound; + + ExplicitKeyValueConfig key_value_config( + Config(/*enabled=*/true, /*valid=*/true)); + LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config); + + loss_based_bandwidth_estimator.SetBandwidthEstimate( + DataRate::KilobitsPerSec(600)); + loss_based_bandwidth_estimator.UpdateBandwidthEstimate( + enough_feedback_1, DataRate::PlusInfinity()); + + EXPECT_NE(loss_based_bandwidth_estimator.GetBandwidthEstimate(), + DataRate::KilobitsPerSec(600)); + + loss_based_bandwidth_estimator.SetBandwidthEstimate( + DataRate::KilobitsPerSec(600)); + + EXPECT_EQ(loss_based_bandwidth_estimator.GetBandwidthEstimate(), + DataRate::KilobitsPerSec(600)); + + loss_based_bandwidth_estimator.UpdateBandwidthEstimate( + enough_feedback_2, DataRate::PlusInfinity()); + + EXPECT_NE(loss_based_bandwidth_estimator.GetBandwidthEstimate(), + DataRate::KilobitsPerSec(600)); +} + +TEST(LossBasedBweV2Test, + SetAcknowledgedBitrateOnlyAffectsTheBweWhenAdditionalFeedbackIsGiven) { + PacketResult enough_feedback_1[2]; + PacketResult enough_feedback_2[2]; + enough_feedback_1[0].sent_packet.size = DataSize::Bytes(15'000); + enough_feedback_1[1].sent_packet.size = DataSize::Bytes(15'000); + enough_feedback_2[0].sent_packet.size = DataSize::Bytes(15'000); + enough_feedback_2[1].sent_packet.size = DataSize::Bytes(15'000); + enough_feedback_1[0].sent_packet.send_time = Timestamp::Zero(); + enough_feedback_1[1].sent_packet.send_time = + Timestamp::Zero() + kObservationDurationLowerBound; + enough_feedback_2[0].sent_packet.send_time = + Timestamp::Zero() + 2 * kObservationDurationLowerBound; + enough_feedback_2[1].sent_packet.send_time = + Timestamp::Zero() + 3 * kObservationDurationLowerBound; + enough_feedback_1[0].receive_time = + Timestamp::Zero() + kObservationDurationLowerBound; + enough_feedback_1[1].receive_time = + Timestamp::Zero() + 2 * kObservationDurationLowerBound; + enough_feedback_2[0].receive_time = + Timestamp::Zero() + 3 * kObservationDurationLowerBound; + enough_feedback_2[1].receive_time = + Timestamp::Zero() + 4 * kObservationDurationLowerBound; + + ExplicitKeyValueConfig key_value_config( + Config(/*enabled=*/true, /*valid=*/true)); + LossBasedBweV2 loss_based_bandwidth_estimator_1(&key_value_config); + LossBasedBweV2 loss_based_bandwidth_estimator_2(&key_value_config); + + loss_based_bandwidth_estimator_1.SetBandwidthEstimate( + DataRate::KilobitsPerSec(600)); + loss_based_bandwidth_estimator_2.SetBandwidthEstimate( + DataRate::KilobitsPerSec(600)); + loss_based_bandwidth_estimator_1.UpdateBandwidthEstimate( + enough_feedback_1, DataRate::PlusInfinity()); + loss_based_bandwidth_estimator_2.UpdateBandwidthEstimate( + enough_feedback_1, DataRate::PlusInfinity()); + + EXPECT_EQ(loss_based_bandwidth_estimator_1.GetBandwidthEstimate(), + DataRate::KilobitsPerSec(660)); + + loss_based_bandwidth_estimator_1.SetAcknowledgedBitrate( + DataRate::KilobitsPerSec(600)); + + EXPECT_EQ(loss_based_bandwidth_estimator_1.GetBandwidthEstimate(), + DataRate::KilobitsPerSec(660)); + + loss_based_bandwidth_estimator_1.UpdateBandwidthEstimate( + enough_feedback_2, DataRate::PlusInfinity()); + loss_based_bandwidth_estimator_2.UpdateBandwidthEstimate( + enough_feedback_2, DataRate::PlusInfinity()); + + EXPECT_NE(loss_based_bandwidth_estimator_1.GetBandwidthEstimate(), + loss_based_bandwidth_estimator_2.GetBandwidthEstimate()); +} + +TEST(LossBasedBweV2Test, + BandwidthEstimateIsCappedToBeTcpFairGivenTooHighLossRate) { + PacketResult enough_feedback_no_received_packets[2]; + enough_feedback_no_received_packets[0].sent_packet.size = + DataSize::Bytes(15'000); + enough_feedback_no_received_packets[1].sent_packet.size = + DataSize::Bytes(15'000); + enough_feedback_no_received_packets[0].sent_packet.send_time = + Timestamp::Zero(); + enough_feedback_no_received_packets[1].sent_packet.send_time = + Timestamp::Zero() + kObservationDurationLowerBound; + enough_feedback_no_received_packets[0].receive_time = + Timestamp::PlusInfinity(); + enough_feedback_no_received_packets[1].receive_time = + Timestamp::PlusInfinity(); + + ExplicitKeyValueConfig key_value_config( + Config(/*enabled=*/true, /*valid=*/true)); + LossBasedBweV2 loss_based_bandwidth_estimator(&key_value_config); + + loss_based_bandwidth_estimator.SetBandwidthEstimate( + DataRate::KilobitsPerSec(600)); + loss_based_bandwidth_estimator.UpdateBandwidthEstimate( + enough_feedback_no_received_packets, DataRate::PlusInfinity()); + + EXPECT_EQ(loss_based_bandwidth_estimator.GetBandwidthEstimate(), + DataRate::KilobitsPerSec(100)); +} + +} // namespace + +} // namespace webrtc diff --git a/modules/congestion_controller/goog_cc/probe_bitrate_estimator.cc b/modules/congestion_controller/goog_cc/probe_bitrate_estimator.cc index fdfd531135..a94f653157 100644 --- a/modules/congestion_controller/goog_cc/probe_bitrate_estimator.cc +++ b/modules/congestion_controller/goog_cc/probe_bitrate_estimator.cc @@ -119,14 +119,14 @@ absl::optional ProbeBitrateEstimator::HandleProbeAndEstimateBitrate( } return absl::nullopt; } - // Since the |send_interval| does not include the time it takes to actually + // Since the `send_interval` does not include the time it takes to actually // send the last packet the size of the last sent packet should not be // included when calculating the send bitrate. RTC_DCHECK_GT(cluster->size_total, cluster->size_last_send); DataSize send_size = cluster->size_total - cluster->size_last_send; DataRate send_rate = send_size / send_interval; - // Since the |receive_interval| does not include the time it takes to + // Since the `receive_interval` does not include the time it takes to // actually receive the first packet the size of the first received packet // should not be included when calculating the receive bitrate. RTC_DCHECK_GT(cluster->size_total, cluster->size_first_receive); diff --git a/modules/congestion_controller/goog_cc/probe_bitrate_estimator.h b/modules/congestion_controller/goog_cc/probe_bitrate_estimator.h index 9f2f438ad9..d5a523b7f3 100644 --- a/modules/congestion_controller/goog_cc/probe_bitrate_estimator.h +++ b/modules/congestion_controller/goog_cc/probe_bitrate_estimator.h @@ -45,7 +45,7 @@ class ProbeBitrateEstimator { DataSize size_total = DataSize::Zero(); }; - // Erases old cluster data that was seen before |timestamp|. + // Erases old cluster data that was seen before `timestamp`. void EraseOldClusters(Timestamp timestamp); std::map clusters_; diff --git a/modules/congestion_controller/goog_cc/probe_controller.cc b/modules/congestion_controller/goog_cc/probe_controller.cc index 29b472a873..df753ed0c9 100644 --- a/modules/congestion_controller/goog_cc/probe_controller.cc +++ b/modules/congestion_controller/goog_cc/probe_controller.cc @@ -38,7 +38,7 @@ constexpr int kMinProbeDurationMs = 15; // the measured results back. constexpr int64_t kMaxWaitingTimeForProbingResultMs = 1000; -// Value of |min_bitrate_to_probe_further_bps_| that indicates +// Value of `min_bitrate_to_probe_further_bps_` that indicates // further probing is disabled. constexpr int kExponentialProbingDisabled = 0; @@ -46,16 +46,16 @@ constexpr int kExponentialProbingDisabled = 0; // specify max bitrate. constexpr int64_t kDefaultMaxProbingBitrateBps = 5000000; -// If the bitrate drops to a factor |kBitrateDropThreshold| or lower -// and we recover within |kBitrateDropTimeoutMs|, then we'll send -// a probe at a fraction |kProbeFractionAfterDrop| of the original bitrate. +// If the bitrate drops to a factor `kBitrateDropThreshold` or lower +// and we recover within `kBitrateDropTimeoutMs`, then we'll send +// a probe at a fraction `kProbeFractionAfterDrop` of the original bitrate. constexpr double kBitrateDropThreshold = 0.66; constexpr int kBitrateDropTimeoutMs = 5000; constexpr double kProbeFractionAfterDrop = 0.85; // Timeout for probing after leaving ALR. If the bitrate drops significantly, // (as determined by the delay based estimator) and we leave ALR, then we will -// send a probe if we recover within |kLeftAlrTimeoutMs| ms. +// send a probe if we recover within `kLeftAlrTimeoutMs` ms. constexpr int kAlrEndedTimeoutMs = 3000; // The expected uncertainty of probe result (as a fraction of the target probe @@ -155,8 +155,8 @@ std::vector ProbeController::SetBitrates( start_bitrate_bps_ = min_bitrate_bps; } - // The reason we use the variable |old_max_bitrate_pbs| is because we - // need to set |max_bitrate_bps_| before we call InitiateProbing. + // The reason we use the variable `old_max_bitrate_pbs` is because we + // need to set `max_bitrate_bps_` before we call InitiateProbing. int64_t old_max_bitrate_bps = max_bitrate_bps_; max_bitrate_bps_ = max_bitrate_bps; @@ -225,7 +225,7 @@ std::vector ProbeController::OnMaxTotalAllocatedBitrate( probes.push_back(second_probe_rate.bps()); } return InitiateProbing(at_time_ms, probes, - config_.allocation_allow_further_probing); + config_.allocation_allow_further_probing.Get()); } max_total_allocated_bitrate_ = max_total_allocated_bitrate; return std::vector(); diff --git a/modules/congestion_controller/goog_cc/probe_controller.h b/modules/congestion_controller/goog_cc/probe_controller.h index bcaa293209..d0f1458ece 100644 --- a/modules/congestion_controller/goog_cc/probe_controller.h +++ b/modules/congestion_controller/goog_cc/probe_controller.h @@ -22,7 +22,6 @@ #include "api/transport/network_control.h" #include "api/transport/webrtc_key_value_config.h" #include "api/units/data_rate.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/experiments/field_trial_parser.h" namespace webrtc { @@ -63,6 +62,9 @@ class ProbeController { RtcEventLog* event_log); ~ProbeController(); + ProbeController(const ProbeController&) = delete; + ProbeController& operator=(const ProbeController&) = delete; + ABSL_MUST_USE_RESULT std::vector SetBitrates( int64_t min_bitrate_bps, int64_t start_bitrate_bps, @@ -94,7 +96,7 @@ class ProbeController { void SetMaxBitrate(int64_t max_bitrate_bps); // Resets the ProbeController to a state equivalent to as if it was just - // created EXCEPT for |enable_periodic_alr_probing_|. + // created EXCEPT for `enable_periodic_alr_probing_`. void Reset(int64_t at_time_ms); ABSL_MUST_USE_RESULT std::vector Process( @@ -143,8 +145,6 @@ class ProbeController { int32_t next_probe_cluster_id_ = 1; ProbeControllerConfig config_; - - RTC_DISALLOW_COPY_AND_ASSIGN(ProbeController); }; } // namespace webrtc diff --git a/modules/congestion_controller/goog_cc/probe_controller_unittest.cc b/modules/congestion_controller/goog_cc/probe_controller_unittest.cc index e27bf71489..4e9144f54c 100644 --- a/modules/congestion_controller/goog_cc/probe_controller_unittest.cc +++ b/modules/congestion_controller/goog_cc/probe_controller_unittest.cc @@ -279,7 +279,7 @@ TEST_F(ProbeControllerTest, PeriodicProbingAfterReset) { kMaxBitrateBps, NowMs()); EXPECT_EQ(probes.size(), 2u); - // Make sure we use |kStartBitrateBps| as the estimated bitrate + // Make sure we use `kStartBitrateBps` as the estimated bitrate // until SetEstimatedBitrate is called with an updated estimate. clock_.AdvanceTimeMilliseconds(10000); probes = probe_controller_->Process(NowMs()); diff --git a/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc b/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc index c5f51df99b..5bb145cf20 100644 --- a/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc +++ b/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc @@ -23,6 +23,7 @@ #include "api/units/data_rate.h" #include "api/units/time_delta.h" #include "logging/rtc_event_log/events/rtc_event_bwe_update_loss_based.h" +#include "modules/congestion_controller/goog_cc/loss_based_bwe_v2.h" #include "modules/remote_bitrate_estimator/include/bwe_defines.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" @@ -226,8 +227,9 @@ SendSideBandwidthEstimation::SendSideBandwidthEstimation( low_loss_threshold_(kDefaultLowLossThreshold), high_loss_threshold_(kDefaultHighLossThreshold), bitrate_threshold_(kDefaultBitrateThreshold), - loss_based_bandwidth_estimation_(key_value_config), - receiver_limit_caps_only_("Enabled") { + loss_based_bandwidth_estimator_v1_(key_value_config), + loss_based_bandwidth_estimator_v2_(key_value_config), + disable_receiver_limit_caps_only_("Disabled") { RTC_DCHECK(event_log); if (BweLossExperimentIsEnabled()) { uint32_t bitrate_threshold_kbps; @@ -240,7 +242,7 @@ SendSideBandwidthEstimation::SendSideBandwidthEstimation( bitrate_threshold_ = DataRate::KilobitsPerSec(bitrate_threshold_kbps); } } - ParseFieldTrial({&receiver_limit_caps_only_}, + ParseFieldTrial({&disable_receiver_limit_caps_only_}, key_value_config->Lookup("WebRTC-Bwe-ReceiverLimitCapsOnly")); } @@ -311,7 +313,7 @@ int SendSideBandwidthEstimation::GetMinBitrate() const { DataRate SendSideBandwidthEstimation::target_rate() const { DataRate target = current_target_; - if (receiver_limit_caps_only_) + if (!disable_receiver_limit_caps_only_) target = std::min(target, receiver_limit_); return std::max(min_bitrate_configured_, target); } @@ -341,18 +343,29 @@ void SendSideBandwidthEstimation::SetAcknowledgedRate( absl::optional acknowledged_rate, Timestamp at_time) { acknowledged_rate_ = acknowledged_rate; - if (acknowledged_rate && loss_based_bandwidth_estimation_.Enabled()) { - loss_based_bandwidth_estimation_.UpdateAcknowledgedBitrate( + if (!acknowledged_rate.has_value()) { + return; + } + if (LossBasedBandwidthEstimatorV1Enabled()) { + loss_based_bandwidth_estimator_v1_.UpdateAcknowledgedBitrate( *acknowledged_rate, at_time); } + if (LossBasedBandwidthEstimatorV2Enabled()) { + loss_based_bandwidth_estimator_v2_.SetAcknowledgedBitrate( + *acknowledged_rate); + } } void SendSideBandwidthEstimation::IncomingPacketFeedbackVector( const TransportPacketsFeedback& report) { - if (loss_based_bandwidth_estimation_.Enabled()) { - loss_based_bandwidth_estimation_.UpdateLossStatistics( + if (LossBasedBandwidthEstimatorV1Enabled()) { + loss_based_bandwidth_estimator_v1_.UpdateLossStatistics( report.packet_feedbacks, report.feedback_time); } + if (LossBasedBandwidthEstimatorV2Enabled()) { + loss_based_bandwidth_estimator_v2_.UpdateBandwidthEstimate( + report.packet_feedbacks, delay_based_limit_); + } } void SendSideBandwidthEstimation::UpdatePacketsLost(int64_t packets_lost, @@ -459,13 +472,16 @@ void SendSideBandwidthEstimation::UpdateEstimate(Timestamp at_time) { new_bitrate = std::max(receiver_limit_, new_bitrate); if (delay_based_limit_.IsFinite()) new_bitrate = std::max(delay_based_limit_, new_bitrate); - if (loss_based_bandwidth_estimation_.Enabled()) { - loss_based_bandwidth_estimation_.Initialize(new_bitrate); + if (LossBasedBandwidthEstimatorV1Enabled()) { + loss_based_bandwidth_estimator_v1_.Initialize(new_bitrate); + } + if (LossBasedBandwidthEstimatorV2Enabled()) { + loss_based_bandwidth_estimator_v2_.SetBandwidthEstimate(new_bitrate); } if (new_bitrate != current_target_) { min_bitrate_history_.clear(); - if (loss_based_bandwidth_estimation_.Enabled()) { + if (LossBasedBandwidthEstimatorV1Enabled()) { min_bitrate_history_.push_back(std::make_pair(at_time, new_bitrate)); } else { min_bitrate_history_.push_back( @@ -483,14 +499,22 @@ void SendSideBandwidthEstimation::UpdateEstimate(Timestamp at_time) { return; } - if (loss_based_bandwidth_estimation_.InUse()) { - DataRate new_bitrate = loss_based_bandwidth_estimation_.Update( + if (LossBasedBandwidthEstimatorV1ReadyForUse()) { + DataRate new_bitrate = loss_based_bandwidth_estimator_v1_.Update( at_time, min_bitrate_history_.front().second, delay_based_limit_, last_round_trip_time_); UpdateTargetBitrate(new_bitrate, at_time); return; } + if (LossBasedBandwidthEstimatorV2ReadyForUse()) { + DataRate new_bitrate = + loss_based_bandwidth_estimator_v2_.GetBandwidthEstimate(); + new_bitrate = std::min(new_bitrate, delay_based_limit_); + UpdateTargetBitrate(new_bitrate, at_time); + return; + } + TimeDelta time_since_loss_packet_report = at_time - last_loss_packet_report_; if (time_since_loss_packet_report < 1.2 * kMaxRtcpFeedbackInterval) { // We only care about loss above a given bitrate threshold. @@ -585,7 +609,7 @@ void SendSideBandwidthEstimation::UpdateMinHistory(Timestamp at_time) { DataRate SendSideBandwidthEstimation::GetUpperLimit() const { DataRate upper_limit = delay_based_limit_; - if (!receiver_limit_caps_only_) + if (disable_receiver_limit_caps_only_) upper_limit = std::min(upper_limit, receiver_limit_); return std::min(upper_limit, max_bitrate_configured_); } @@ -628,4 +652,26 @@ void SendSideBandwidthEstimation::UpdateTargetBitrate(DataRate new_bitrate, void SendSideBandwidthEstimation::ApplyTargetLimits(Timestamp at_time) { UpdateTargetBitrate(current_target_, at_time); } + +bool SendSideBandwidthEstimation::LossBasedBandwidthEstimatorV1Enabled() const { + return loss_based_bandwidth_estimator_v1_.Enabled() && + !LossBasedBandwidthEstimatorV2Enabled(); +} + +bool SendSideBandwidthEstimation::LossBasedBandwidthEstimatorV1ReadyForUse() + const { + return LossBasedBandwidthEstimatorV1Enabled() && + loss_based_bandwidth_estimator_v1_.InUse(); +} + +bool SendSideBandwidthEstimation::LossBasedBandwidthEstimatorV2Enabled() const { + return loss_based_bandwidth_estimator_v2_.IsEnabled(); +} + +bool SendSideBandwidthEstimation::LossBasedBandwidthEstimatorV2ReadyForUse() + const { + return LossBasedBandwidthEstimatorV2Enabled() && + loss_based_bandwidth_estimator_v2_.IsReady(); +} + } // namespace webrtc diff --git a/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h b/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h index b97b940db0..f31f30f70d 100644 --- a/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h +++ b/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h @@ -26,6 +26,7 @@ #include "api/units/time_delta.h" #include "api/units/timestamp.h" #include "modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.h" +#include "modules/congestion_controller/goog_cc/loss_based_bwe_v2.h" #include "rtc_base/experiments/field_trial_parser.h" namespace webrtc { @@ -134,21 +135,27 @@ class SendSideBandwidthEstimation { // Gets the upper limit for the target bitrate. This is the minimum of the // delay based limit, the receiver limit and the loss based controller limit. DataRate GetUpperLimit() const; - // Prints a warning if |bitrate| if sufficiently long time has past since last + // Prints a warning if `bitrate` if sufficiently long time has past since last // warning. void MaybeLogLowBitrateWarning(DataRate bitrate, Timestamp at_time); // Stores an update to the event log if the loss rate has changed, the target // has changed, or sufficient time has passed since last stored event. void MaybeLogLossBasedEvent(Timestamp at_time); - // Cap |bitrate| to [min_bitrate_configured_, max_bitrate_configured_] and - // set |current_bitrate_| to the capped value and updates the event log. + // Cap `bitrate` to [min_bitrate_configured_, max_bitrate_configured_] and + // set `current_bitrate_` to the capped value and updates the event log. void UpdateTargetBitrate(DataRate bitrate, Timestamp at_time); // Applies lower and upper bounds to the current target rate. // TODO(srte): This seems to be called even when limits haven't changed, that // should be cleaned up. void ApplyTargetLimits(Timestamp at_time); + bool LossBasedBandwidthEstimatorV1Enabled() const; + bool LossBasedBandwidthEstimatorV2Enabled() const; + + bool LossBasedBandwidthEstimatorV1ReadyForUse() const; + bool LossBasedBandwidthEstimatorV2ReadyForUse() const; + RttBasedBackoff rtt_backoff_; LinkCapacityTracker link_capacity_; @@ -189,8 +196,9 @@ class SendSideBandwidthEstimation { float low_loss_threshold_; float high_loss_threshold_; DataRate bitrate_threshold_; - LossBasedBandwidthEstimation loss_based_bandwidth_estimation_; - FieldTrialFlag receiver_limit_caps_only_; + LossBasedBandwidthEstimation loss_based_bandwidth_estimator_v1_; + LossBasedBweV2 loss_based_bandwidth_estimator_v2_; + FieldTrialFlag disable_receiver_limit_caps_only_; }; } // namespace webrtc #endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_SEND_SIDE_BANDWIDTH_ESTIMATION_H_ diff --git a/modules/congestion_controller/goog_cc/test/goog_cc_printer.cc b/modules/congestion_controller/goog_cc/test/goog_cc_printer.cc index 52baab06c7..6dadf8b9c4 100644 --- a/modules/congestion_controller/goog_cc/test/goog_cc_printer.cc +++ b/modules/congestion_controller/goog_cc/test/goog_cc_printer.cc @@ -87,7 +87,7 @@ std::deque GoogCcStatePrinter::CreateLoggers() { }; auto loss_cont = [&] { return &controller_->bandwidth_estimation_ - ->loss_based_bandwidth_estimation_; + ->loss_based_bandwidth_estimator_v1_; }; std::deque loggers({ Log("time", [=] { return target_.at_time; }), diff --git a/modules/congestion_controller/goog_cc/test/goog_cc_printer.h b/modules/congestion_controller/goog_cc/test/goog_cc_printer.h index 3eee7814cf..16fa657e71 100644 --- a/modules/congestion_controller/goog_cc/test/goog_cc_printer.h +++ b/modules/congestion_controller/goog_cc/test/goog_cc_printer.h @@ -61,7 +61,7 @@ class GoogCcDebugFactory : public GoogCcNetworkControllerFactory { std::unique_ptr Create( NetworkControllerConfig config) override; - void PrintState(const Timestamp at_time); + void PrintState(Timestamp at_time); void AttachWriter(std::unique_ptr log_writer); diff --git a/modules/congestion_controller/goog_cc/trendline_estimator.cc b/modules/congestion_controller/goog_cc/trendline_estimator.cc index 1008badf6a..7fdf66c518 100644 --- a/modules/congestion_controller/goog_cc/trendline_estimator.cc +++ b/modules/congestion_controller/goog_cc/trendline_estimator.cc @@ -44,7 +44,7 @@ size_t ReadTrendlineFilterWindowSize( if (parsed_values == 1) { if (window_size > 1) return window_size; - RTC_LOG(WARNING) << "Window size must be greater than 1."; + RTC_LOG(LS_WARNING) << "Window size must be greater than 1."; } RTC_LOG(LS_WARNING) << "Failed to parse parameters for BweWindowSizeInPackets" " experiment from field trial string. Using default."; diff --git a/modules/congestion_controller/goog_cc/trendline_estimator.h b/modules/congestion_controller/goog_cc/trendline_estimator.h index 75b971d187..6fd442498b 100644 --- a/modules/congestion_controller/goog_cc/trendline_estimator.h +++ b/modules/congestion_controller/goog_cc/trendline_estimator.h @@ -20,7 +20,6 @@ #include "api/network_state_predictor.h" #include "api/transport/webrtc_key_value_config.h" #include "modules/congestion_controller/goog_cc/delay_increase_detector_interface.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/experiments/struct_parameters_parser.h" namespace webrtc { @@ -57,6 +56,9 @@ class TrendlineEstimator : public DelayIncreaseDetectorInterface { ~TrendlineEstimator() override; + TrendlineEstimator(const TrendlineEstimator&) = delete; + TrendlineEstimator& operator=(const TrendlineEstimator&) = delete; + // Update the estimator with a new sample. The deltas should represent deltas // between timestamp groups as defined by the InterArrival class. void Update(double recv_delta_ms, @@ -118,8 +120,6 @@ class TrendlineEstimator : public DelayIncreaseDetectorInterface { BandwidthUsage hypothesis_; BandwidthUsage hypothesis_predicted_; NetworkStatePredictor* network_state_predictor_; - - RTC_DISALLOW_COPY_AND_ASSIGN(TrendlineEstimator); }; } // namespace webrtc diff --git a/modules/congestion_controller/include/receive_side_congestion_controller.h b/modules/congestion_controller/include/receive_side_congestion_controller.h index 84661c05b7..fdef7f95c8 100644 --- a/modules/congestion_controller/include/receive_side_congestion_controller.h +++ b/modules/congestion_controller/include/receive_side_congestion_controller.h @@ -60,7 +60,7 @@ class ReceiveSideCongestionController : public CallStatsObserver, void OnBitrateChanged(int bitrate_bps); // Ensures the remote party is notified of the receive bitrate no larger than - // |bitrate| using RTCP REMB. + // `bitrate` using RTCP REMB. void SetMaxDesiredReceiveBitrate(DataRate bitrate); // Implements Module. diff --git a/modules/congestion_controller/pcc/monitor_interval.cc b/modules/congestion_controller/pcc/monitor_interval.cc index 6bc9f4a7ef..de1e2d5e69 100644 --- a/modules/congestion_controller/pcc/monitor_interval.cc +++ b/modules/congestion_controller/pcc/monitor_interval.cc @@ -70,13 +70,10 @@ double PccMonitorInterval::ComputeDelayGradient( return 0; } double sum_times = 0; - double sum_delays = 0; for (const ReceivedPacket& packet : received_packets_) { double time_delta_us = (packet.sent_time - received_packets_[0].sent_time).us(); - double delay = packet.delay.us(); sum_times += time_delta_us; - sum_delays += delay; } double sum_squared_scaled_time_deltas = 0; double sum_scaled_time_delta_dot_delay = 0; diff --git a/modules/congestion_controller/receive_side_congestion_controller_unittest.cc b/modules/congestion_controller/receive_side_congestion_controller_unittest.cc index 5e03179f42..2aade06cbc 100644 --- a/modules/congestion_controller/receive_side_congestion_controller_unittest.cc +++ b/modules/congestion_controller/receive_side_congestion_controller_unittest.cc @@ -28,9 +28,9 @@ namespace webrtc { namespace { // Helper to convert some time format to resolution used in absolute send time -// header extension, rounded upwards. |t| is the time to convert, in some -// resolution. |denom| is the value to divide |t| by to get whole seconds, -// e.g. |denom| = 1000 if |t| is in milliseconds. +// header extension, rounded upwards. `t` is the time to convert, in some +// resolution. `denom` is the value to divide `t` by to get whole seconds, +// e.g. `denom` = 1000 if `t` is in milliseconds. uint32_t AbsSendTime(int64_t t, int64_t denom) { return (((t << 18) + (denom >> 1)) / denom) & 0x00fffffful; } diff --git a/modules/congestion_controller/remb_throttler.h b/modules/congestion_controller/remb_throttler.h index 67c0280749..2f610c1df9 100644 --- a/modules/congestion_controller/remb_throttler.h +++ b/modules/congestion_controller/remb_throttler.h @@ -30,7 +30,7 @@ class RembThrottler : public RemoteBitrateObserver { RembThrottler(RembSender remb_sender, Clock* clock); // Ensures the remote party is notified of the receive bitrate no larger than - // |bitrate| using RTCP REMB. + // `bitrate` using RTCP REMB. void SetMaxDesiredReceiveBitrate(DataRate bitrate); // Implements RemoteBitrateObserver; diff --git a/modules/congestion_controller/rtp/BUILD.gn b/modules/congestion_controller/rtp/BUILD.gn index 1a70447307..39d4d68192 100644 --- a/modules/congestion_controller/rtp/BUILD.gn +++ b/modules/congestion_controller/rtp/BUILD.gn @@ -61,6 +61,7 @@ rtc_library("transport_feedback") { "../../../rtc_base:rtc_base_approved", "../../../rtc_base/network:sent_packet", "../../../rtc_base/synchronization:mutex", + "../../../rtc_base/system:no_unique_address", "../../../system_wrappers", "../../../system_wrappers:field_trial", "../../rtp_rtcp:rtp_rtcp_format", diff --git a/modules/congestion_controller/rtp/control_handler.h b/modules/congestion_controller/rtp/control_handler.h index 1da6463219..16ffc32a44 100644 --- a/modules/congestion_controller/rtp/control_handler.h +++ b/modules/congestion_controller/rtp/control_handler.h @@ -19,7 +19,6 @@ #include "api/units/data_size.h" #include "api/units/time_delta.h" #include "modules/pacing/paced_sender.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/system/no_unique_address.h" namespace webrtc { @@ -33,6 +32,9 @@ class CongestionControlHandler { CongestionControlHandler(); ~CongestionControlHandler(); + CongestionControlHandler(const CongestionControlHandler&) = delete; + CongestionControlHandler& operator=(const CongestionControlHandler&) = delete; + void SetTargetRate(TargetTransferRate new_target_rate); void SetNetworkAvailability(bool network_available); void SetPacerQueue(TimeDelta expected_queue_time); @@ -48,7 +50,6 @@ class CongestionControlHandler { int64_t pacer_expected_queue_ms_ = 0; RTC_NO_UNIQUE_ADDRESS SequenceChecker sequenced_checker_; - RTC_DISALLOW_COPY_AND_ASSIGN(CongestionControlHandler); }; } // namespace webrtc #endif // MODULES_CONGESTION_CONTROLLER_RTP_CONTROL_HANDLER_H_ diff --git a/modules/congestion_controller/rtp/transport_feedback_demuxer.cc b/modules/congestion_controller/rtp/transport_feedback_demuxer.cc index 6ab3ad80fa..50987b2302 100644 --- a/modules/congestion_controller/rtp/transport_feedback_demuxer.cc +++ b/modules/congestion_controller/rtp/transport_feedback_demuxer.cc @@ -15,10 +15,17 @@ namespace webrtc { namespace { static const size_t kMaxPacketsInHistory = 5000; } + +TransportFeedbackDemuxer::TransportFeedbackDemuxer() { + // In case the construction thread is different from where the registration + // and callbacks occur, detach from the construction thread. + observer_checker_.Detach(); +} + void TransportFeedbackDemuxer::RegisterStreamFeedbackObserver( std::vector ssrcs, StreamFeedbackObserver* observer) { - MutexLock lock(&observers_lock_); + RTC_DCHECK_RUN_ON(&observer_checker_); RTC_DCHECK(observer); RTC_DCHECK(absl::c_find_if(observers_, [=](const auto& pair) { return pair.second == observer; @@ -28,7 +35,7 @@ void TransportFeedbackDemuxer::RegisterStreamFeedbackObserver( void TransportFeedbackDemuxer::DeRegisterStreamFeedbackObserver( StreamFeedbackObserver* observer) { - MutexLock lock(&observers_lock_); + RTC_DCHECK_RUN_ON(&observer_checker_); RTC_DCHECK(observer); const auto it = absl::c_find_if( observers_, [=](const auto& pair) { return pair.second == observer; }); @@ -37,7 +44,7 @@ void TransportFeedbackDemuxer::DeRegisterStreamFeedbackObserver( } void TransportFeedbackDemuxer::AddPacket(const RtpPacketSendInfo& packet_info) { - MutexLock lock(&lock_); + RTC_DCHECK_RUN_ON(&observer_checker_); StreamFeedbackObserver::StreamPacketInfo info; info.ssrc = packet_info.media_ssrc; @@ -55,24 +62,22 @@ void TransportFeedbackDemuxer::AddPacket(const RtpPacketSendInfo& packet_info) { void TransportFeedbackDemuxer::OnTransportFeedback( const rtcp::TransportFeedback& feedback) { + RTC_DCHECK_RUN_ON(&observer_checker_); + std::vector stream_feedbacks; - { - MutexLock lock(&lock_); - for (const auto& packet : feedback.GetAllPackets()) { - int64_t seq_num = - seq_num_unwrapper_.UnwrapWithoutUpdate(packet.sequence_number()); - auto it = history_.find(seq_num); - if (it != history_.end()) { - auto packet_info = it->second; - packet_info.received = packet.received(); - stream_feedbacks.push_back(packet_info); - if (packet.received()) - history_.erase(it); - } + for (const auto& packet : feedback.GetAllPackets()) { + int64_t seq_num = + seq_num_unwrapper_.UnwrapWithoutUpdate(packet.sequence_number()); + auto it = history_.find(seq_num); + if (it != history_.end()) { + auto packet_info = it->second; + packet_info.received = packet.received(); + stream_feedbacks.push_back(std::move(packet_info)); + if (packet.received()) + history_.erase(it); } } - MutexLock lock(&observers_lock_); for (auto& observer : observers_) { std::vector selected_feedback; for (const auto& packet_info : stream_feedbacks) { diff --git a/modules/congestion_controller/rtp/transport_feedback_demuxer.h b/modules/congestion_controller/rtp/transport_feedback_demuxer.h index 634a37ea1a..7f4f5750d2 100644 --- a/modules/congestion_controller/rtp/transport_feedback_demuxer.h +++ b/modules/congestion_controller/rtp/transport_feedback_demuxer.h @@ -14,14 +14,26 @@ #include #include +#include "api/sequence_checker.h" #include "modules/include/module_common_types_public.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/no_unique_address.h" namespace webrtc { -class TransportFeedbackDemuxer : public StreamFeedbackProvider { +// Implementation of StreamFeedbackProvider that provides a way for +// implementations of StreamFeedbackObserver to register for feedback callbacks +// for a given set of SSRCs. +// Registration methods need to be called from the same execution context +// (thread or task queue) and callbacks to +// StreamFeedbackObserver::OnPacketFeedbackVector will be made in that same +// context. +// TODO(tommi): This appears to be the only implementation of this interface. +// Do we need the interface? +class TransportFeedbackDemuxer final : public StreamFeedbackProvider { public: + TransportFeedbackDemuxer(); + // Implements StreamFeedbackProvider interface void RegisterStreamFeedbackObserver( std::vector ssrcs, @@ -32,17 +44,16 @@ class TransportFeedbackDemuxer : public StreamFeedbackProvider { void OnTransportFeedback(const rtcp::TransportFeedback& feedback); private: - Mutex lock_; - SequenceNumberUnwrapper seq_num_unwrapper_ RTC_GUARDED_BY(&lock_); + RTC_NO_UNIQUE_ADDRESS SequenceChecker observer_checker_; + SequenceNumberUnwrapper seq_num_unwrapper_ RTC_GUARDED_BY(&observer_checker_); std::map history_ - RTC_GUARDED_BY(&lock_); + RTC_GUARDED_BY(&observer_checker_); // Maps a set of ssrcs to corresponding observer. Vectors are used rather than // set/map to ensure that the processing order is consistent independently of // the randomized ssrcs. - Mutex observers_lock_; std::vector, StreamFeedbackObserver*>> - observers_ RTC_GUARDED_BY(&observers_lock_); + observers_ RTC_GUARDED_BY(&observer_checker_); }; } // namespace webrtc diff --git a/modules/desktop_capture/BUILD.gn b/modules/desktop_capture/BUILD.gn index 25b92bed45..25ea1cb05a 100644 --- a/modules/desktop_capture/BUILD.gn +++ b/modules/desktop_capture/BUILD.gn @@ -11,11 +11,6 @@ import("//build/config/ui.gni") import("//tools/generate_stubs/rules.gni") import("../../webrtc.gni") -if (rtc_use_pipewire) { - assert(rtc_pipewire_version == "0.2" || rtc_pipewire_version == "0.3", - "Unsupported PipeWire version") -} - use_desktop_capture_differ_sse2 = current_cpu == "x86" || current_cpu == "x64" config("x11_config") { @@ -82,9 +77,6 @@ if (rtc_include_tests) { "window_finder_unittest.cc", ] public_configs = [ ":x11_config" ] - if (is_win) { - deps += [ "../../rtc_base:win32" ] - } } } @@ -223,19 +215,23 @@ if (is_linux || is_chromeos) { ] } - if (rtc_pipewire_version == "0.3") { - pkg_config("pipewire") { - packages = [ "libpipewire-0.3" ] - if (!rtc_link_pipewire) { - ignore_libs = true - } + pkg_config("pipewire") { + packages = [ "libpipewire-0.3" ] + if (!rtc_link_pipewire) { + ignore_libs = true } - } else { - pkg_config("pipewire") { - packages = [ "libpipewire-0.2" ] - if (!rtc_link_pipewire) { - ignore_libs = true - } + } + + pkg_config("gbm") { + packages = [ "gbm" ] + } + pkg_config("egl") { + packages = [ "egl" ] + } + pkg_config("libdrm") { + packages = [ "libdrm" ] + if (!rtc_link_pipewire) { + ignore_libs = true } } @@ -246,18 +242,18 @@ if (is_linux || is_chromeos) { configs = [ "../../:common_config", ":pipewire", + ":libdrm", ] deps = [ "../../rtc_base" ] - extra_header = "linux/pipewire_stub_header.fragment" + extra_header = "linux/wayland/pipewire_stub_header.fragment" logging_function = "RTC_LOG(LS_VERBOSE)" logging_include = "rtc_base/logging.h" - output_name = "linux/pipewire_stubs" - path_from_source = "modules/desktop_capture/linux" - if (rtc_pipewire_version == "0.3") { - sigs = [ "linux/pipewire03.sigs" ] - } else { - sigs = [ "linux/pipewire02.sigs" ] - } + output_name = "linux/wayland/pipewire_stubs" + path_from_source = "modules/desktop_capture/linux/wayland" + sigs = [ + "linux/wayland/pipewire.sigs", + "linux/wayland/drm.sigs", + ] } } @@ -406,37 +402,37 @@ rtc_library("desktop_capture_generic") { if (build_with_mozilla && (is_linux || is_chromeos)) { sources += [ "app_capturer_linux.cc", - "linux/app_capturer_x11.cc", - "linux/desktop_device_info_linux.cc", - "linux/desktop_device_info_linux.h", - "linux/shared_x_util.cc", - "linux/shared_x_util.h", + "linux/x11/app_capturer_x11.cc", + "linux/x11/desktop_device_info_linux.cc", + "linux/x11/desktop_device_info_linux.h", + "linux/x11/shared_x_util.cc", + "linux/x11/shared_x_util.h", ] } } if (rtc_use_x11_extensions) { sources += [ - "linux/mouse_cursor_monitor_x11.cc", - "linux/mouse_cursor_monitor_x11.h", - "linux/screen_capturer_x11.cc", - "linux/screen_capturer_x11.h", - "linux/shared_x_display.cc", - "linux/shared_x_display.h", - "linux/window_capturer_x11.cc", - "linux/window_capturer_x11.h", - "linux/window_finder_x11.cc", - "linux/window_finder_x11.h", - "linux/window_list_utils.cc", - "linux/window_list_utils.h", - "linux/x_atom_cache.cc", - "linux/x_atom_cache.h", - "linux/x_error_trap.cc", - "linux/x_error_trap.h", - "linux/x_server_pixel_buffer.cc", - "linux/x_server_pixel_buffer.h", - "linux/x_window_property.cc", - "linux/x_window_property.h", + "linux/x11/mouse_cursor_monitor_x11.cc", + "linux/x11/mouse_cursor_monitor_x11.h", + "linux/x11/screen_capturer_x11.cc", + "linux/x11/screen_capturer_x11.h", + "linux/x11/shared_x_display.cc", + "linux/x11/shared_x_display.h", + "linux/x11/window_capturer_x11.cc", + "linux/x11/window_capturer_x11.h", + "linux/x11/window_finder_x11.cc", + "linux/x11/window_finder_x11.h", + "linux/x11/window_list_utils.cc", + "linux/x11/window_list_utils.h", + "linux/x11/x_atom_cache.cc", + "linux/x11/x_atom_cache.h", + "linux/x11/x_error_trap.cc", + "linux/x11/x_error_trap.h", + "linux/x11/x_server_pixel_buffer.cc", + "linux/x11/x_server_pixel_buffer.h", + "linux/x11/x_window_property.cc", + "linux/x11/x_window_property.h", ] libs = [ "X11", @@ -533,7 +529,10 @@ rtc_library("desktop_capture_generic") { "d3d11.lib", "dxgi.lib", ] - deps += [ "../../rtc_base:win32" ] + deps += [ + "../../rtc_base:rtc_base_approved", + "../../rtc_base:win32", + ] } absl_deps = [ @@ -558,19 +557,35 @@ rtc_library("desktop_capture_generic") { if (rtc_use_pipewire) { sources += [ - "linux/base_capturer_pipewire.cc", - "linux/base_capturer_pipewire.h", + "linux/wayland/base_capturer_pipewire.cc", + "linux/wayland/base_capturer_pipewire.h", + "linux/wayland/egl_dmabuf.cc", + "linux/wayland/egl_dmabuf.h", + "linux/wayland/mouse_cursor_monitor_pipewire.cc", + "linux/wayland/mouse_cursor_monitor_pipewire.h", + "linux/wayland/scoped_glib.cc", + "linux/wayland/scoped_glib.h", + "linux/wayland/screencast_portal.cc", + "linux/wayland/screencast_portal.h", + "linux/wayland/shared_screencast_stream.cc", + "linux/wayland/shared_screencast_stream.h", ] configs += [ - ":pipewire_config", ":gio", ":pipewire", + ":gbm", + ":egl", + ":libdrm", ] if (!rtc_link_pipewire) { deps += [ ":pipewire_stubs" ] } + + public_configs += [ ":pipewire_config" ] + + deps += [ "../../rtc_base:sanitizer" ] } if (rtc_enable_win_wgc) { diff --git a/modules/desktop_capture/OWNERS b/modules/desktop_capture/OWNERS index 79df492e69..e3bc32ee5c 100644 --- a/modules/desktop_capture/OWNERS +++ b/modules/desktop_capture/OWNERS @@ -1,2 +1,2 @@ -jamiewalch@chromium.org -joedow@chromium.org +alcooper@chromium.org +mfoltz@chromium.org diff --git a/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h b/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h index 46ba5257fe..f5c2ce201b 100644 --- a/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h +++ b/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h @@ -31,8 +31,8 @@ class BlankDetectorDesktopCapturerWrapper final public DesktopCapturer::Callback { public: // Creates BlankDetectorDesktopCapturerWrapper. BlankDesktopCapturerWrapper - // takes ownership of |capturer|. The |blank_pixel| is the unmodified color - // returned by the |capturer|. + // takes ownership of `capturer`. The `blank_pixel` is the unmodified color + // returned by the `capturer`. BlankDetectorDesktopCapturerWrapper(std::unique_ptr capturer, RgbaColor blank_pixel); ~BlankDetectorDesktopCapturerWrapper() override; @@ -55,7 +55,7 @@ class BlankDetectorDesktopCapturerWrapper final bool IsBlankFrame(const DesktopFrame& frame) const; - // Detects whether pixel at (x, y) equals to |blank_pixel_|. + // Detects whether pixel at (x, y) equals to `blank_pixel_`. bool IsBlankPixel(const DesktopFrame& frame, int x, int y) const; const std::unique_ptr capturer_; diff --git a/modules/desktop_capture/cropped_desktop_frame.cc b/modules/desktop_capture/cropped_desktop_frame.cc index 1ab0355075..54488b7d62 100644 --- a/modules/desktop_capture/cropped_desktop_frame.cc +++ b/modules/desktop_capture/cropped_desktop_frame.cc @@ -15,7 +15,6 @@ #include "modules/desktop_capture/desktop_region.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -25,10 +24,11 @@ class CroppedDesktopFrame : public DesktopFrame { CroppedDesktopFrame(std::unique_ptr frame, const DesktopRect& rect); + CroppedDesktopFrame(const CroppedDesktopFrame&) = delete; + CroppedDesktopFrame& operator=(const CroppedDesktopFrame&) = delete; + private: const std::unique_ptr frame_; - - RTC_DISALLOW_COPY_AND_ASSIGN(CroppedDesktopFrame); }; std::unique_ptr CreateCroppedDesktopFrame( @@ -36,7 +36,9 @@ std::unique_ptr CreateCroppedDesktopFrame( const DesktopRect& rect) { RTC_DCHECK(frame); - if (!DesktopRect::MakeSize(frame->size()).ContainsRect(rect)) { + DesktopRect intersection = DesktopRect::MakeSize(frame->size()); + intersection.IntersectWith(rect); + if (intersection.is_empty()) { return nullptr; } @@ -45,7 +47,7 @@ std::unique_ptr CreateCroppedDesktopFrame( } return std::unique_ptr( - new CroppedDesktopFrame(std::move(frame), rect)); + new CroppedDesktopFrame(std::move(frame), intersection)); } CroppedDesktopFrame::CroppedDesktopFrame(std::unique_ptr frame, diff --git a/modules/desktop_capture/cropped_desktop_frame.h b/modules/desktop_capture/cropped_desktop_frame.h index bbc3c86cad..5c672c7d32 100644 --- a/modules/desktop_capture/cropped_desktop_frame.h +++ b/modules/desktop_capture/cropped_desktop_frame.h @@ -19,11 +19,11 @@ namespace webrtc { -// Creates a DesktopFrame to contain only the area of |rect| in the original -// |frame|. -// |frame| should not be nullptr. |rect| is in |frame| coordinate, i.e. -// |frame|->top_left() does not impact the area of |rect|. -// Returns nullptr frame if |rect| is not contained by the bounds of |frame|. +// Creates a DesktopFrame to contain only the area of `rect` in the original +// `frame`. +// `frame` should not be nullptr. `rect` is in `frame` coordinate, i.e. +// `frame`->top_left() does not impact the area of `rect`. +// Returns nullptr frame if `rect` is not contained by the bounds of `frame`. std::unique_ptr RTC_EXPORT CreateCroppedDesktopFrame(std::unique_ptr frame, const DesktopRect& rect); diff --git a/modules/desktop_capture/cropped_desktop_frame_unittest.cc b/modules/desktop_capture/cropped_desktop_frame_unittest.cc index c4ccb87596..9becf69636 100644 --- a/modules/desktop_capture/cropped_desktop_frame_unittest.cc +++ b/modules/desktop_capture/cropped_desktop_frame_unittest.cc @@ -25,16 +25,21 @@ std::unique_ptr CreateTestFrame() { TEST(CroppedDesktopFrameTest, DoNotCreateWrapperIfSizeIsNotChanged) { std::unique_ptr original = CreateTestFrame(); - // owned by |original| and CroppedDesktopFrame. + // owned by `original` and CroppedDesktopFrame. DesktopFrame* raw_original = original.get(); std::unique_ptr cropped = CreateCroppedDesktopFrame( std::move(original), DesktopRect::MakeWH(10, 20)); ASSERT_EQ(cropped.get(), raw_original); } -TEST(CroppedDesktopFrameTest, ReturnNullptrIfSizeIsNotSufficient) { - ASSERT_EQ(nullptr, CreateCroppedDesktopFrame(CreateTestFrame(), - DesktopRect::MakeWH(11, 10))); +TEST(CroppedDesktopFrameTest, CropWhenPartiallyOutOfBounds) { + std::unique_ptr cropped = + CreateCroppedDesktopFrame(CreateTestFrame(), DesktopRect::MakeWH(11, 10)); + ASSERT_NE(nullptr, cropped); + ASSERT_EQ(cropped->size().width(), 10); + ASSERT_EQ(cropped->size().height(), 10); + ASSERT_EQ(cropped->top_left().x(), 0); + ASSERT_EQ(cropped->top_left().y(), 0); } TEST(CroppedDesktopFrameTest, ReturnNullIfCropRegionIsOutOfBounds) { diff --git a/modules/desktop_capture/cropping_window_capturer.cc b/modules/desktop_capture/cropping_window_capturer.cc index bd1ba46315..5e0faaade9 100644 --- a/modules/desktop_capture/cropping_window_capturer.cc +++ b/modules/desktop_capture/cropping_window_capturer.cc @@ -99,9 +99,16 @@ void CroppingWindowCapturer::OnCaptureResult( return; } - callback_->OnCaptureResult( - Result::SUCCESS, - CreateCroppedDesktopFrame(std::move(screen_frame), window_rect)); + std::unique_ptr cropped_frame = + CreateCroppedDesktopFrame(std::move(screen_frame), window_rect); + + if (!cropped_frame) { + RTC_LOG(LS_WARNING) << "Window is outside of the captured display"; + callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr); + return; + } + + callback_->OnCaptureResult(Result::SUCCESS, std::move(cropped_frame)); } bool CroppingWindowCapturer::IsOccluded(const DesktopVector& pos) { diff --git a/modules/desktop_capture/cropping_window_capturer.h b/modules/desktop_capture/cropping_window_capturer.h index 272a196972..56478030b1 100644 --- a/modules/desktop_capture/cropping_window_capturer.h +++ b/modules/desktop_capture/cropping_window_capturer.h @@ -45,7 +45,7 @@ class RTC_EXPORT CroppingWindowCapturer : public DesktopCapturer, bool FocusOnSelectedSource() override; bool IsOccluded(const DesktopVector& pos) override; - // DesktopCapturer::Callback implementation, passed to |screen_capturer_| to + // DesktopCapturer::Callback implementation, passed to `screen_capturer_` to // intercept the capture result. void OnCaptureResult(DesktopCapturer::Result result, std::unique_ptr frame) override; diff --git a/modules/desktop_capture/cropping_window_capturer_win.cc b/modules/desktop_capture/cropping_window_capturer_win.cc index 31ddbe1b33..64d9219e24 100644 --- a/modules/desktop_capture/cropping_window_capturer_win.cc +++ b/modules/desktop_capture/cropping_window_capturer_win.cc @@ -15,7 +15,7 @@ #include "modules/desktop_capture/win/window_capture_utils.h" #include "rtc_base/logging.h" #include "rtc_base/trace_event.h" -#include "rtc_base/win32.h" +#include "rtc_base/win/windows_version.h" namespace webrtc { @@ -118,7 +118,7 @@ struct TopWindowVerifierContext : public SelectedWindowContext { // firing an assert when enabled, report that the selected window isn't // topmost to avoid inadvertent capture of other windows. RTC_LOG(LS_ERROR) << "Failed to enumerate windows: " << lastError; - RTC_NOTREACHED(); + RTC_DCHECK_NOTREACHED(); return false; } } @@ -196,7 +196,8 @@ void CroppingWindowCapturerWin::CaptureFrame() { } bool CroppingWindowCapturerWin::ShouldUseScreenCapturer() { - if (!rtc::IsWindows8OrLater() && window_capture_helper_.IsAeroEnabled()) { + if (rtc::rtc_win::GetVersion() < rtc::rtc_win::Version::VERSION_WIN8 && + window_capture_helper_.IsAeroEnabled()) { return false; } @@ -247,7 +248,7 @@ bool CroppingWindowCapturerWin::ShouldUseScreenCapturer() { } if (region_type == SIMPLEREGION) { - // The |region_rect| returned from GetRgnBox() is always in window + // The `region_rect` returned from GetRgnBox() is always in window // coordinate. region_rect.Translate(window_region_rect_.left(), window_region_rect_.top()); @@ -255,10 +256,10 @@ bool CroppingWindowCapturerWin::ShouldUseScreenCapturer() { // system permits drawing. // https://msdn.microsoft.com/en-us/library/windows/desktop/dd144950(v=vs.85).aspx. // - // |region_rect| should always be inside of |window_region_rect_|. So after - // the intersection, |window_region_rect_| == |region_rect|. If so, what's + // `region_rect` should always be inside of `window_region_rect_`. So after + // the intersection, `window_region_rect_` == `region_rect`. If so, what's // the point of the intersecting operations? Why cannot we directly retrieve - // |window_region_rect_| from GetWindowRegionTypeWithBoundary() function? + // `window_region_rect_` from GetWindowRegionTypeWithBoundary() function? // TODO(zijiehe): Figure out the purpose of these intersections. window_region_rect_.IntersectWith(region_rect); content_rect.IntersectWith(region_rect); @@ -266,14 +267,14 @@ bool CroppingWindowCapturerWin::ShouldUseScreenCapturer() { // Check if the client area is out of the screen area. When the window is // maximized, only its client area is visible in the screen, the border will - // be hidden. So we are using |content_rect| here. + // be hidden. So we are using `content_rect` here. if (!GetFullscreenRect().ContainsRect(content_rect)) { return false; } // Check if the window is occluded by any other window, excluding the child - // windows, context menus, and |excluded_window_|. - // |content_rect| is preferred, see the comments on + // windows, context menus, and `excluded_window_`. + // `content_rect` is preferred, see the comments on // IsWindowIntersectWithSelectedWindow(). TopWindowVerifierContext context(selected, reinterpret_cast(excluded_window()), @@ -293,7 +294,7 @@ DesktopRect CroppingWindowCapturerWin::GetWindowRectInVirtualScreen() { } window_rect.IntersectWith(window_region_rect_); - // Convert |window_rect| to be relative to the top-left of the virtual screen. + // Convert `window_rect` to be relative to the top-left of the virtual screen. DesktopRect screen_rect(GetFullscreenRect()); window_rect.IntersectWith(screen_rect); window_rect.Translate(-screen_rect.left(), -screen_rect.top()); diff --git a/modules/desktop_capture/desktop_and_cursor_composer.cc b/modules/desktop_capture/desktop_and_cursor_composer.cc index 69b8b40c73..7ca0af038c 100644 --- a/modules/desktop_capture/desktop_and_cursor_composer.cc +++ b/modules/desktop_capture/desktop_and_cursor_composer.cc @@ -21,7 +21,6 @@ #include "modules/desktop_capture/mouse_cursor.h" #include "modules/desktop_capture/mouse_cursor_monitor.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -64,7 +63,7 @@ void AlphaBlend(uint8_t* dest, // content before releasing the underlying frame. class DesktopFrameWithCursor : public DesktopFrame { public: - // Takes ownership of |frame|. + // Takes ownership of `frame`. DesktopFrameWithCursor(std::unique_ptr frame, const MouseCursor& cursor, const DesktopVector& position, @@ -72,6 +71,9 @@ class DesktopFrameWithCursor : public DesktopFrame { bool cursor_changed); ~DesktopFrameWithCursor() override; + DesktopFrameWithCursor(const DesktopFrameWithCursor&) = delete; + DesktopFrameWithCursor& operator=(const DesktopFrameWithCursor&) = delete; + DesktopRect cursor_rect() const { return cursor_rect_; } private: @@ -80,8 +82,6 @@ class DesktopFrameWithCursor : public DesktopFrame { DesktopVector restore_position_; std::unique_ptr restore_frame_; DesktopRect cursor_rect_; - - RTC_DISALLOW_COPY_AND_ASSIGN(DesktopFrameWithCursor); }; DesktopFrameWithCursor::DesktopFrameWithCursor( @@ -113,7 +113,7 @@ DesktopFrameWithCursor::DesktopFrameWithCursor( if (cursor_rect_.is_empty()) return; - // Copy original screen content under cursor to |restore_frame_|. + // Copy original screen content under cursor to `restore_frame_`. restore_position_ = cursor_rect_.top_left(); restore_frame_.reset(new BasicDesktopFrame(cursor_rect_.size())); restore_frame_->CopyPixelsFrom(*this, cursor_rect_.top_left(), @@ -218,7 +218,7 @@ void DesktopAndCursorComposer::OnCaptureResult( // and location in logical(DIP) pixels on Retina monitor. This will cause // problem when the desktop is mixed with Retina and non-Retina monitors. // So we use DIP pixel for all location info and compensate with the scale - // factor of current frame to the |relative_position|. + // factor of current frame to the `relative_position`. const float scale = frame->scale_factor(); relative_position.set(relative_position.x() * scale, relative_position.y() * scale); diff --git a/modules/desktop_capture/desktop_and_cursor_composer.h b/modules/desktop_capture/desktop_and_cursor_composer.h index 8f95721ec2..edb764d168 100644 --- a/modules/desktop_capture/desktop_and_cursor_composer.h +++ b/modules/desktop_capture/desktop_and_cursor_composer.h @@ -21,7 +21,6 @@ #include "modules/desktop_capture/mouse_cursor.h" #include "modules/desktop_capture/mouse_cursor_monitor.h" #include "modules/desktop_capture/shared_memory.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/system/rtc_export.h" namespace webrtc { @@ -35,12 +34,15 @@ class RTC_EXPORT DesktopAndCursorComposer public: // Creates a new composer that captures mouse cursor using // MouseCursorMonitor::Create(options) and renders it into the frames - // generated by |desktop_capturer|. + // generated by `desktop_capturer`. DesktopAndCursorComposer(std::unique_ptr desktop_capturer, const DesktopCaptureOptions& options); ~DesktopAndCursorComposer() override; + DesktopAndCursorComposer(const DesktopAndCursorComposer&) = delete; + DesktopAndCursorComposer& operator=(const DesktopAndCursorComposer&) = delete; + // Creates a new composer that relies on an external source for cursor shape // and position information via the MouseCursorMonitor::Callback interface. static std::unique_ptr @@ -84,8 +86,6 @@ class RTC_EXPORT DesktopAndCursorComposer DesktopVector cursor_position_; DesktopRect previous_cursor_rect_; bool cursor_changed_ = false; - - RTC_DISALLOW_COPY_AND_ASSIGN(DesktopAndCursorComposer); }; } // namespace webrtc diff --git a/modules/desktop_capture/desktop_and_cursor_composer_unittest.cc b/modules/desktop_capture/desktop_and_cursor_composer_unittest.cc index 00253d38e2..5596576d9e 100644 --- a/modules/desktop_capture/desktop_and_cursor_composer_unittest.cc +++ b/modules/desktop_capture/desktop_and_cursor_composer_unittest.cc @@ -201,7 +201,7 @@ class DesktopAndCursorComposerTest : public ::testing::Test, } protected: - // Owned by |blender_|. + // Owned by `blender_`. FakeScreenCapturer* fake_screen_; FakeMouseMonitor* fake_cursor_; @@ -280,7 +280,7 @@ TEST_F(DesktopAndCursorComposerTest, CursorShouldBeIgnoredIfFrameMayContainIt) { fake_cursor_->SetState(MouseCursorMonitor::INSIDE, abs_pos); blender_.CaptureFrame(); - // If the frame may already have contained the cursor, then |CaptureFrame()| + // If the frame may already have contained the cursor, then `CaptureFrame()` // should not have modified it, so it should be the same as the control. EXPECT_TRUE(frame_); const DesktopVector rel_pos(abs_pos.subtract(control_frame->top_left())); @@ -291,7 +291,7 @@ TEST_F(DesktopAndCursorComposerTest, CursorShouldBeIgnoredIfFrameMayContainIt) { control_frame->GetFrameDataAtPos(rel_pos))); } else { - // |CaptureFrame()| should have modified the frame to have the cursor. + // `CaptureFrame()` should have modified the frame to have the cursor. EXPECT_NE( *reinterpret_cast(frame_->GetFrameDataAtPos(rel_pos)), *reinterpret_cast( diff --git a/modules/desktop_capture/desktop_capture_options.cc b/modules/desktop_capture/desktop_capture_options.cc index c89896d5fd..ab7932195c 100644 --- a/modules/desktop_capture/desktop_capture_options.cc +++ b/modules/desktop_capture/desktop_capture_options.cc @@ -14,6 +14,11 @@ #elif defined(WEBRTC_WIN) #include "modules/desktop_capture/win/full_screen_win_application_handler.h" #endif +#if defined(WEBRTC_USE_PIPEWIRE) +#include "modules/desktop_capture/linux/wayland/shared_screencast_stream.h" +#endif + +#include "rtc_base/ref_counted_object.h" namespace webrtc { @@ -35,13 +40,19 @@ DesktopCaptureOptions DesktopCaptureOptions::CreateDefault() { #if defined(WEBRTC_USE_X11) result.set_x_display(SharedXDisplay::CreateDefault()); #endif +#if defined(WEBRTC_USE_PIPEWIRE) + result.set_screencast_stream(SharedScreenCastStream::CreateDefault()); +#endif #if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS) - result.set_configuration_monitor(new DesktopConfigurationMonitor()); + result.set_configuration_monitor( + rtc::make_ref_counted()); result.set_full_screen_window_detector( - new FullScreenWindowDetector(CreateFullScreenMacApplicationHandler)); + rtc::make_ref_counted( + CreateFullScreenMacApplicationHandler)); #elif defined(WEBRTC_WIN) result.set_full_screen_window_detector( - new FullScreenWindowDetector(CreateFullScreenWinApplicationHandler)); + rtc::make_ref_counted( + CreateFullScreenWinApplicationHandler)); #endif return result; } diff --git a/modules/desktop_capture/desktop_capture_options.h b/modules/desktop_capture/desktop_capture_options.h index a693803aa0..c6bc52f8a7 100644 --- a/modules/desktop_capture/desktop_capture_options.h +++ b/modules/desktop_capture/desktop_capture_options.h @@ -14,7 +14,11 @@ #include "rtc_base/system/rtc_export.h" #if defined(WEBRTC_USE_X11) -#include "modules/desktop_capture/linux/shared_x_display.h" +#include "modules/desktop_capture/linux/x11/shared_x_display.h" +#endif + +#if defined(WEBRTC_USE_PIPEWIRE) +#include "modules/desktop_capture/linux/wayland/shared_screencast_stream.h" #endif #if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS) @@ -43,7 +47,9 @@ class RTC_EXPORT DesktopCaptureOptions { DesktopCaptureOptions& operator=(DesktopCaptureOptions&& options); #if defined(WEBRTC_USE_X11) - SharedXDisplay* x_display() const { return x_display_; } + const rtc::scoped_refptr& x_display() const { + return x_display_; + } void set_x_display(rtc::scoped_refptr x_display) { x_display_ = x_display; } @@ -53,7 +59,8 @@ class RTC_EXPORT DesktopCaptureOptions { // TODO(zijiehe): Remove both DesktopConfigurationMonitor and // FullScreenChromeWindowDetector out of DesktopCaptureOptions. It's not // reasonable for external consumers to set these two parameters. - DesktopConfigurationMonitor* configuration_monitor() const { + const rtc::scoped_refptr& configuration_monitor() + const { return configuration_monitor_; } // If nullptr is set, ScreenCapturer won't work and WindowCapturer may return @@ -67,7 +74,8 @@ class RTC_EXPORT DesktopCaptureOptions { void set_allow_iosurface(bool allow) { allow_iosurface_ = allow; } #endif - FullScreenWindowDetector* full_screen_window_detector() const { + const rtc::scoped_refptr& + full_screen_window_detector() const { return full_screen_window_detector_; } void set_full_screen_window_detector( @@ -100,13 +108,13 @@ class RTC_EXPORT DesktopCaptureOptions { #if defined(WEBRTC_WIN) // Enumerating windows owned by the current process on Windows has some // complications due to |GetWindowText*()| APIs potentially causing a - // deadlock (see the comments in the |GetWindowListHandler()| function in + // deadlock (see the comments in the `GetWindowListHandler()` function in // window_capture_utils.cc for more details on the deadlock). // To avoid this issue, consumers can either ensure that the thread that runs - // their message loop never waits on |GetSourceList()|, or they can set this + // their message loop never waits on `GetSourceList()`, or they can set this // flag to false which will prevent windows running in the current process // from being enumerated and included in the results. Consumers can still - // provide the WindowId for their own windows to |SelectSource()| and capture + // provide the WindowId for their own windows to `SelectSource()` and capture // them. bool enumerate_current_process_windows() const { return enumerate_current_process_windows_; @@ -161,13 +169,26 @@ class RTC_EXPORT DesktopCaptureOptions { #if defined(WEBRTC_USE_PIPEWIRE) bool allow_pipewire() const { return allow_pipewire_; } void set_allow_pipewire(bool allow) { allow_pipewire_ = allow; } + + const rtc::scoped_refptr& screencast_stream() const { + return screencast_stream_; + } + void set_screencast_stream( + rtc::scoped_refptr stream) { + screencast_stream_ = stream; + } #endif private: #if defined(WEBRTC_USE_X11) rtc::scoped_refptr x_display_; #endif - +#if defined(WEBRTC_USE_PIPEWIRE) + // An instance of shared PipeWire ScreenCast stream we share between + // BaseCapturerPipeWire and MouseCursorMonitorPipeWire as cursor information + // is sent together with screen content. + rtc::scoped_refptr screencast_stream_; +#endif #if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS) rtc::scoped_refptr configuration_monitor_; bool allow_iosurface_ = false; diff --git a/modules/desktop_capture/desktop_capture_types.h b/modules/desktop_capture/desktop_capture_types.h index 5f9966bb6d..bc26db7cc4 100644 --- a/modules/desktop_capture/desktop_capture_types.h +++ b/modules/desktop_capture/desktop_capture_types.h @@ -27,9 +27,14 @@ const WindowId kNullWindowId = 0; // - On Windows: integer display device index. // - On OSX: CGDirectDisplayID cast to intptr_t. // - On Linux (with X11): TBD. +// - On ChromeOS: display::Display::id() is an int64_t. // On Windows, ScreenId is implementation dependent: sending a ScreenId from one // implementation to another usually won't work correctly. -typedef intptr_t ScreenId; +#if defined(CHROMEOS) + typedef int64_t ScreenId; +#else + typedef intptr_t ScreenId; +#endif // The screen id corresponds to all screen combined together. const ScreenId kFullDesktopScreenId = -1; @@ -39,7 +44,7 @@ const ScreenId kInvalidScreenId = -2; // Integers to attach to each DesktopFrame to differentiate the generator of // the frame. The entries in this namespace should remain in sync with the // SequentialDesktopCapturerId enum, which is logged via UMA. -// |kScreenCapturerWinGdi| and |kScreenCapturerWinDirectx| values are preserved +// `kScreenCapturerWinGdi` and `kScreenCapturerWinDirectx` values are preserved // to maintain compatibility namespace DesktopCapturerId { constexpr uint32_t CreateFourCC(char a, char b, char c, char d) { diff --git a/modules/desktop_capture/desktop_capturer.cc b/modules/desktop_capture/desktop_capturer.cc index 735aa4d530..9e6b99ac58 100644 --- a/modules/desktop_capture/desktop_capturer.cc +++ b/modules/desktop_capture/desktop_capturer.cc @@ -79,7 +79,7 @@ std::unique_ptr DesktopCapturer::CreateScreenCapturer( const DesktopCaptureOptions& options) { #if defined(RTC_ENABLE_WIN_WGC) if (options.allow_wgc_capturer() && - rtc::rtc_win::GetVersion() >= rtc::rtc_win::Version::VERSION_WIN10_RS5) { + rtc::rtc_win::GetVersion() >= rtc::rtc_win::Version::VERSION_WIN10_20H1) { return WgcCapturerWin::CreateRawScreenCapturer(options); } #endif // defined(RTC_ENABLE_WIN_WGC) diff --git a/modules/desktop_capture/desktop_capturer.h b/modules/desktop_capture/desktop_capturer.h index 64339c0ca6..822a75d947 100644 --- a/modules/desktop_capture/desktop_capturer.h +++ b/modules/desktop_capture/desktop_capturer.h @@ -50,8 +50,8 @@ class RTC_EXPORT DesktopCapturer { // Interface that must be implemented by the DesktopCapturer consumers. class Callback { public: - // Called after a frame has been captured. |frame| is not nullptr if and - // only if |result| is SUCCESS. + // Called after a frame has been captured. `frame` is not nullptr if and + // only if `result` is SUCCESS. virtual void OnCaptureResult(Result result, std::unique_ptr frame) = 0; @@ -59,7 +59,11 @@ class RTC_EXPORT DesktopCapturer { virtual ~Callback() {} }; +#if defined(CHROMEOS) + typedef int64_t SourceId; +#else typedef intptr_t SourceId; +#endif static_assert(std::is_same::value, "SourceId should be a same type as ScreenId."); @@ -77,7 +81,7 @@ class RTC_EXPORT DesktopCapturer { virtual ~DesktopCapturer(); - // Called at the beginning of a capturing session. |callback| must remain + // Called at the beginning of a capturing session. `callback` must remain // valid until capturer is destroyed. virtual void Start(Callback* callback) = 0; @@ -120,11 +124,11 @@ class RTC_EXPORT DesktopCapturer { // implementation does not support this functionality. virtual bool FocusOnSelectedSource(); - // Returns true if the |pos| on the selected source is covered by other + // Returns true if the `pos` on the selected source is covered by other // elements on the display, and is not visible to the users. - // |pos| is in full desktop coordinates, i.e. the top-left monitor always + // `pos` is in full desktop coordinates, i.e. the top-left monitor always // starts from (0, 0). - // The return value if |pos| is out of the scope of the source is undefined. + // The return value if `pos` is out of the scope of the source is undefined. virtual bool IsOccluded(const DesktopVector& pos); // Creates a DesktopCapturer instance which targets to capture windows. diff --git a/modules/desktop_capture/desktop_capturer_differ_wrapper.cc b/modules/desktop_capture/desktop_capturer_differ_wrapper.cc index 4e80f30520..916d76f7ce 100644 --- a/modules/desktop_capture/desktop_capturer_differ_wrapper.cc +++ b/modules/desktop_capture/desktop_capturer_differ_wrapper.cc @@ -25,8 +25,8 @@ namespace webrtc { namespace { -// Returns true if (0, 0) - (|width|, |height|) vector in |old_buffer| and -// |new_buffer| are equal. |width| should be less than 32 +// Returns true if (0, 0) - (`width`, `height`) vector in `old_buffer` and +// `new_buffer` are equal. `width` should be less than 32 // (defined by kBlockSize), otherwise BlockDifference() should be used. bool PartialBlockDifference(const uint8_t* old_buffer, const uint8_t* new_buffer, @@ -45,9 +45,9 @@ bool PartialBlockDifference(const uint8_t* old_buffer, return false; } -// Compares columns in the range of [|left|, |right|), in a row in the -// range of [|top|, |top| + |height|), starts from |old_buffer| and -// |new_buffer|, and outputs updated regions into |output|. |stride| is the +// Compares columns in the range of [`left`, `right`), in a row in the +// range of [`top`, `top` + `height`), starts from `old_buffer` and +// `new_buffer`, and outputs updated regions into `output`. `stride` is the // DesktopFrame::stride(). void CompareRow(const uint8_t* old_buffer, const uint8_t* new_buffer, @@ -68,7 +68,7 @@ void CompareRow(const uint8_t* old_buffer, // The first block-column in a continuous dirty area in current block-row. int first_dirty_x_block = -1; - // We always need to add dirty area into |output| in the last block, so handle + // We always need to add dirty area into `output` in the last block, so handle // it separatedly. for (int x = 0; x < block_count; x++) { if (BlockDifference(old_buffer, new_buffer, height, stride)) { @@ -109,8 +109,8 @@ void CompareRow(const uint8_t* old_buffer, } } -// Compares |rect| area in |old_frame| and |new_frame|, and outputs dirty -// regions into |output|. +// Compares `rect` area in `old_frame` and `new_frame`, and outputs dirty +// regions into `output`. void CompareFrames(const DesktopFrame& old_frame, const DesktopFrame& new_frame, DesktopRect rect, diff --git a/modules/desktop_capture/desktop_capturer_differ_wrapper_unittest.cc b/modules/desktop_capture/desktop_capturer_differ_wrapper_unittest.cc index d16390dee4..9ccef3cc10 100644 --- a/modules/desktop_capture/desktop_capturer_differ_wrapper_unittest.cc +++ b/modules/desktop_capture/desktop_capturer_differ_wrapper_unittest.cc @@ -29,9 +29,9 @@ namespace webrtc { namespace { -// Compares and asserts |frame|.updated_region() equals to |rects|. This -// function does not care about the order of the |rects| and it does not expect -// DesktopRegion to return an exact area for each rectangle in |rects|. +// Compares and asserts `frame`.updated_region() equals to `rects`. This +// function does not care about the order of the `rects` and it does not expect +// DesktopRegion to return an exact area for each rectangle in `rects`. template